code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
def A(__a: list[list[int]] , __a: int , __a: int , __a: set ):
lowerCAmelCase_ , lowerCAmelCase_ = len(SCREAMING_SNAKE_CASE_ ), len(grid[0] )
if (
min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowerCAmelCase_ = 0
count += depth_first_search(SCREAMING_SNAKE_CASE_ , row + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , row - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col + 1 , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col - 1 , SCREAMING_SNAKE_CASE_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 363 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def A(__a: Dict ):
lowerCAmelCase_ = r"\w+[.]\d+"
lowerCAmelCase_ = re.findall(__a , __a )
for pat in pats:
lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def A(__a: str , __a: Tuple , __a: List[Any] ):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A(__a: Dict , __a: Any , __a: List[Any]=42 ):
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) )
lowerCAmelCase_ = flatten_dict(__a )
lowerCAmelCase_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase_ = rename_key(__a )
lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(__a )
return unflatten_dict(__a )
| 22 | 0 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = '''▁'''
lowerCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class __magic_name__ (a__ , unittest.TestCase ):
lowerCamelCase__ = BertGenerationTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def __a ( self ) -> int:
super().setUp()
lowerCAmelCase_ = BertGenerationTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> str:
lowerCAmelCase_ = '<s>'
lowerCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1002 )
def __a ( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = BertGenerationTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __a ( self ) -> Tuple:
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = 'Hello World!'
lowerCAmelCase_ = [18536, 2260, 101]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCAmelCase_ = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@require_torch
@slow
def __a ( self ) -> Dict:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCAmelCase_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase_ = ' '.join(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ = self.big_tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , return_tensors="pt" , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ = BertGenerationConfig()
lowerCAmelCase_ = BertGenerationEncoder(SCREAMING_SNAKE_CASE_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**SCREAMING_SNAKE_CASE_ )
model(**SCREAMING_SNAKE_CASE_ )
@slow
def __a ( self ) -> Optional[int]:
# fmt: off
lowerCAmelCase_ = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 364 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22 | 0 |
def A(__a: str ):
assert column_title.isupper()
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(lowerCAmelCase__ ) - 1
lowerCAmelCase_ = 0
while index >= 0:
lowerCAmelCase_ = (ord(column_title[index] ) - 64) * pow(26 , lowerCAmelCase__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 365 |
import math
def A(__a: int ):
return math.sqrt(__a ) * math.sqrt(__a ) == num
def A(__a: int ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = n
while left <= right:
lowerCAmelCase_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase_ = mid - 1
else:
lowerCAmelCase_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __a ( self ) -> str:
lowerCAmelCase_ = 1
lowerCAmelCase_ = 3
lowerCAmelCase_ = (32, 32)
lowerCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowercase )
return image
@property
def __a ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__lowercase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def __a ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def __a ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(__lowercase )
def __a ( self ) -> str:
lowerCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.dummy_cond_unet_upscale
lowerCAmelCase_ = DDPMScheduler()
lowerCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
lowerCAmelCase_ = self.dummy_vae
lowerCAmelCase_ = self.dummy_text_encoder
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ = Image.fromarray(np.uinta(__lowercase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__lowercase , low_res_scheduler=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , max_noise_level=350 , )
lowerCAmelCase_ = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase_ = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ = sd_pipe(
[prompt] , image=__lowercase , generator=__lowercase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ = sd_pipe(
[prompt] , image=__lowercase , generator=__lowercase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__lowercase , )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
lowerCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowerCAmelCase_ = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> List[str]:
lowerCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.dummy_cond_unet_upscale
lowerCAmelCase_ = DDPMScheduler()
lowerCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
lowerCAmelCase_ = self.dummy_vae
lowerCAmelCase_ = self.dummy_text_encoder
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ = Image.fromarray(np.uinta(__lowercase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__lowercase , low_res_scheduler=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , max_noise_level=350 , )
lowerCAmelCase_ = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase_ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase_ = output.images
assert image.shape[0] == 2
lowerCAmelCase_ = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ = sd_pipe(
[prompt] , image=__lowercase , generator=__lowercase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase_ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.dummy_cond_unet_upscale
lowerCAmelCase_ = DDPMScheduler()
lowerCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
lowerCAmelCase_ = self.dummy_vae
lowerCAmelCase_ = self.dummy_text_encoder
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ = Image.fromarray(np.uinta(__lowercase ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowerCAmelCase_ = unet.half()
lowerCAmelCase_ = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__lowercase , low_res_scheduler=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , max_noise_level=350 , )
lowerCAmelCase_ = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = sd_pipe(
[prompt] , image=__lowercase , generator=__lowercase , num_inference_steps=2 , output_type="np" , ).images
lowerCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
lowerCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
lowerCAmelCase_ = '''stabilityai/stable-diffusion-x4-upscaler'''
lowerCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
lowerCAmelCase_ = '''a cat sitting on a park bench'''
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=__lowercase , image=__lowercase , generator=__lowercase , output_type="np" , )
lowerCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
lowerCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
lowerCAmelCase_ = '''stabilityai/stable-diffusion-x4-upscaler'''
lowerCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__lowercase , torch_dtype=torch.floataa , )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
lowerCAmelCase_ = '''a cat sitting on a park bench'''
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=__lowercase , image=__lowercase , generator=__lowercase , output_type="np" , )
lowerCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __a ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
lowerCAmelCase_ = '''stabilityai/stable-diffusion-x4-upscaler'''
lowerCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__lowercase , torch_dtype=torch.floataa , )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ = '''a cat sitting on a park bench'''
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=__lowercase , image=__lowercase , generator=__lowercase , num_inference_steps=5 , output_type="np" , )
lowerCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 366 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def A(__a: Dict , __a: List[str]=None ):
require_version(deps[pkg] , __a )
| 22 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __magic_name__ :
lowerCamelCase__ = MBartConfig
lowerCamelCase__ = {}
lowerCamelCase__ = """gelu"""
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , ) -> Tuple:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase_ = prepare_mbart_inputs_dict(__a , __a , __a )
return config, inputs_dict
def __a ( self , _a , _a ) -> Tuple:
lowerCAmelCase_ = TFMBartModel(config=__a ).get_decoder()
lowerCAmelCase_ = inputs_dict["input_ids"]
lowerCAmelCase_ = input_ids[:1, :]
lowerCAmelCase_ = inputs_dict["attention_mask"][:1, :]
lowerCAmelCase_ = inputs_dict["head_mask"]
lowerCAmelCase_ = 1
# first forward pass
lowerCAmelCase_ = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple()
lowerCAmelCase_ = past_key_values[1]
def A(__a: Tuple , __a: Tuple , __a: Tuple , __a: Tuple=None , __a: List[str]=None , __a: List[str]=None , __a: int=None , __a: Optional[int]=None , ):
if attention_mask is None:
lowerCAmelCase_ = tf.cast(tf.math.not_equal(snake_case_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __magic_name__ (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowerCamelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase__ = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
def __a ( self , _a , _a , _a , _a , _a ) -> str:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __a ( self ) -> Any:
lowerCAmelCase_ = TFMBartModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=__a )
def __a ( self ) -> Dict:
self.config_tester.run_common_tests()
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
@require_sentencepiece
@require_tokenizers
@require_tf
class __magic_name__ (unittest.TestCase ):
lowerCamelCase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
lowerCamelCase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
lowerCamelCase__ = """facebook/mbart-large-en-ro"""
@cached_property
def __a ( self ) -> Any:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __a ( self , **_a ) -> Tuple:
lowerCAmelCase_ = self.translate_src_text(**__a )
self.assertListEqual(self.expected_text , __a )
def __a ( self , **_a ) -> int:
lowerCAmelCase_ = self.tokenizer(self.src_text , **__a , return_tensors="tf" )
lowerCAmelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
lowerCAmelCase_ = self.tokenizer.batch_decode(__a , skip_special_tokens=__a )
return generated_words
@slow
def __a ( self ) -> Tuple:
self._assert_generated_batch_equal_expected()
| 367 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ''' Hello world! cécé herlolip'''
lowerCamelCase__ = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def A(__a: Any ):
lowerCAmelCase_ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ):
lowerCAmelCase_ = dct.pop(__a )
lowerCAmelCase_ = val
def A(__a: Tuple ):
lowerCAmelCase_ = torch.load(__a , map_location="cpu" )
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def A(__a: List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape
lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a )
lowerCAmelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ):
if not os.path.exists(__a ):
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval()
else:
lowerCAmelCase_ = load_xsum_checkpoint(__a )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
lowerCAmelCase_ = checkpoint_path.replace("." , "-" )
lowerCAmelCase_ = BartConfig.from_pretrained(__a )
lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 )
lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(__a , __a ).all():
raise ValueError(
F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
lowerCAmelCase_ = bart.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(__a , __a , __a )
lowerCAmelCase_ = BartForSequenceClassification(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a )
lowerCAmelCase_ = model(__a )[0] # logits
else: # no classification heads to worry about
lowerCAmelCase_ = bart.model.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"]
lowerCAmelCase_ = bart.extract_features(__a )
if hf_checkpoint_name == "facebook/bart-large":
lowerCAmelCase_ = BartModel(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = model(__a ).model[0]
else:
lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt
model.model.load_state_dict(__a )
if hasattr(__a , "lm_head" ):
lowerCAmelCase_ = make_linear_from_emb(model.model.shared )
lowerCAmelCase_ = model.model(__a )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
lowerCamelCase__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 22 | 0 |
def A(__a: Tuple , __a: List[str] ):
def get_matched_characters(__a: str , __a: Union[str, Any] ) -> str:
lowerCAmelCase_ = []
lowerCAmelCase_ = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowerCAmelCase_ = int(max(0 , i - limit ) )
lowerCAmelCase_ = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCAmelCase_ )
lowerCAmelCase_ = F"{_stra[0:_stra.index(lowerCAmelCase_ )]} {_stra[_stra.index(lowerCAmelCase_ ) + 1:]}"
return "".join(lowerCAmelCase_ )
# matching characters
lowerCAmelCase_ = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ = len(lowerCAmelCase_ )
# transposition
lowerCAmelCase_ = (
len([(ca, ca) for ca, ca in zip(lowerCAmelCase_ , lowerCAmelCase_ ) if ca != ca] ) // 2
)
if not match_count:
lowerCAmelCase_ = 0.0
else:
lowerCAmelCase_ = (
1
/ 3
* (
match_count / len(lowerCAmelCase_ )
+ match_count / len(lowerCAmelCase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowerCAmelCase_ = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 368 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = MobileBertTokenizer
lowerCamelCase__ = MobileBertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = filter_non_english
lowerCamelCase__ = '''google/mobilebert-uncased'''
def __a ( self ) -> Optional[Any]:
super().setUp()
lowerCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowerCAmelCase_ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __a ( self , _a ) -> Any:
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = "unwanted, running"
return input_text, output_text
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def __a ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __a ( self ) -> Dict:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCAmelCase_ = {}
for i, token in enumerate(_a ):
lowerCAmelCase_ = i
lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __a ( self ) -> Optional[int]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __a ( self ) -> List[str]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __a ( self ) -> Dict:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __a ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCAmelCase_ = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False
lowerCAmelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = ["的", "人", "有"]
lowerCAmelCase_ = "".join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = True
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = False
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 22 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class __magic_name__ (__snake_case ):
lowerCamelCase__ = """dpr"""
def __init__( self , _a=30522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.0_2 , _a=1E-12 , _a=0 , _a="absolute" , _a = 0 , **_a , ) -> List[str]:
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = projection_dim
lowerCAmelCase_ = position_embedding_type
| 369 |
import math
from collections.abc import Iterator
from itertools import takewhile
def A(__a: int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A():
lowerCAmelCase_ = 2
while True:
if is_prime(__a ):
yield num
num += 1
def A(__a: int = 200_0000 ):
return sum(takewhile(lambda __a : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 22 | 0 |
import sys
from collections import defaultdict
class _UpperCAmelCase :
def __init__( self ) -> str:
lowerCAmelCase_ = []
def __a ( self , _a ) -> Any:
return self.node_position[vertex]
def __a ( self , _a , _a ) -> Tuple:
lowerCAmelCase_ = pos
def __a ( self , _a , _a , _a , _a ) -> Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCAmelCase_ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCAmelCase_ = 2 * start + 1
else:
lowerCAmelCase_ = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCAmelCase_ , lowerCAmelCase_ = heap[smallest_child], positions[smallest_child]
lowerCAmelCase_ , lowerCAmelCase_ = (
heap[start],
positions[start],
)
lowerCAmelCase_ , lowerCAmelCase_ = temp, tempa
lowerCAmelCase_ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __snake_case )
self.top_to_bottom(__snake_case , __snake_case , __snake_case , __snake_case )
def __a ( self , _a , _a , _a , _a ) -> Optional[int]:
lowerCAmelCase_ = position[index]
while index != 0:
lowerCAmelCase_ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCAmelCase_ = heap[parent]
lowerCAmelCase_ = position[parent]
self.set_position(position[parent] , __snake_case )
else:
lowerCAmelCase_ = val
lowerCAmelCase_ = temp
self.set_position(__snake_case , __snake_case )
break
lowerCAmelCase_ = parent
else:
lowerCAmelCase_ = val
lowerCAmelCase_ = temp
self.set_position(__snake_case , 0 )
def __a ( self , _a , _a ) -> Optional[int]:
lowerCAmelCase_ = len(__snake_case ) // 2 - 1
for i in range(__snake_case , -1 , -1 ):
self.top_to_bottom(__snake_case , __snake_case , len(__snake_case ) , __snake_case )
def __a ( self , _a , _a ) -> Optional[Any]:
lowerCAmelCase_ = positions[0]
lowerCAmelCase_ = sys.maxsize
self.top_to_bottom(__snake_case , 0 , len(__snake_case ) , __snake_case )
return temp
def A(__a: Dict ):
lowerCAmelCase_ = Heap()
lowerCAmelCase_ = [0] * len(__lowerCAmelCase )
lowerCAmelCase_ = [-1] * len(__lowerCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCAmelCase_ = [] # Heap of Distance of vertices from their neighboring vertex
lowerCAmelCase_ = []
for vertex in range(len(__lowerCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__lowerCAmelCase )
heap.node_position.append(__lowerCAmelCase )
lowerCAmelCase_ = []
lowerCAmelCase_ = 1
lowerCAmelCase_ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCAmelCase_ = 0
lowerCAmelCase_ = distance
heap.heapify(__lowerCAmelCase , __lowerCAmelCase )
for _ in range(1 , len(__lowerCAmelCase ) ):
lowerCAmelCase_ = heap.delete_minimum(__lowerCAmelCase , __lowerCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCAmelCase_ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__lowerCAmelCase )]
):
lowerCAmelCase_ = distance
heap.bottom_to_top(
__lowerCAmelCase , heap.get_position(__lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase_ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowerCamelCase__ = int(input('''Enter number of edges: ''').strip())
lowerCamelCase__ = defaultdict(list)
for _ in range(edges_number):
lowerCamelCase__ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 370 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''mobilenet_v2'''
def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = depth_multiplier
lowerCAmelCase_ = depth_divisible_by
lowerCAmelCase_ = min_depth
lowerCAmelCase_ = expand_ratio
lowerCAmelCase_ = output_stride
lowerCAmelCase_ = first_layer_is_expansion
lowerCAmelCase_ = finegrained_output
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = tf_padding
lowerCAmelCase_ = classifier_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = semantic_loss_ignore_index
class __magic_name__ (__lowercase ):
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __a ( self ) -> float:
return 1E-4
| 22 | 0 |
def A(__a: int = 100 ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 371 |
from __future__ import annotations
def A(__a: dict , __a: str ):
lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start]
while stack:
lowerCAmelCase_ = stack.pop()
explored.add(__a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__a )
return explored
lowerCamelCase__ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 22 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
lowerCamelCase__ = {
'''albert-base-v1''': 5_12,
'''albert-large-v1''': 5_12,
'''albert-xlarge-v1''': 5_12,
'''albert-xxlarge-v1''': 5_12,
'''albert-base-v2''': 5_12,
'''albert-large-v2''': 5_12,
'''albert-xlarge-v2''': 5_12,
'''albert-xxlarge-v2''': 5_12,
}
lowerCamelCase__ = '''▁'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = AlbertTokenizer
def __init__( self , _a=None , _a=None , _a=True , _a=True , _a=False , _a="[CLS]" , _a="[SEP]" , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase_ = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
lowerCAmelCase_ = do_lower_case
lowerCAmelCase_ = remove_space
lowerCAmelCase_ = keep_accents
lowerCAmelCase_ = vocab_file
lowerCAmelCase_ = False if not self.vocab_file else True
def __a ( self , _a , _a = None ) -> List[int]:
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , _a , _a = None ) -> List[int]:
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , _a , _a = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase_ = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 350 |
def A(__a: Tuple ):
lowerCAmelCase_ = len(__a )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )]
# Reverse whole list
lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 22 | 0 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ = 5_00_00
lowerCamelCase__ = 50_00
lowerCamelCase__ , lowerCamelCase__ = os.path.split(__file__)
lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def A(__a: datasets.Dataset , __a: Union[str, Any] ):
for i in range(__a ):
lowerCAmelCase_ = dataset[i]
@get_duration
def A(__a: datasets.Dataset , __a: Optional[Any] , __a: Tuple ):
for i in range(0 , len(__a ) , __a ):
lowerCAmelCase_ = dataset[i : i + batch_size]
@get_duration
def A(__a: datasets.Dataset , __a: Tuple , __a: Optional[Any] ):
with dataset.formatted_as(type=__a ):
for i in range(__a ):
lowerCAmelCase_ = dataset[i]
@get_duration
def A(__a: datasets.Dataset , __a: Any , __a: Any , __a: List[str] ):
with dataset.formatted_as(type=__a ):
for i in range(0 , __a , __a ):
lowerCAmelCase_ = dataset[i : i + batch_size]
def A():
lowerCAmelCase_ = {"num examples": SPEED_TEST_N_EXAMPLES}
lowerCAmelCase_ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
lowerCAmelCase_ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
lowerCAmelCase_ = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
lowerCAmelCase_ = generate_example_dataset(
os.path.join(__a , "dataset.arrow" ) , __a , num_examples=__a , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(__a ) )
lowerCAmelCase_ = func(__a , **__a )
print("shuffling dataset" )
lowerCAmelCase_ = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(__a ) )
lowerCAmelCase_ = func(
__a , **__a )
with open(__a , "wb" ) as f:
f.write(json.dumps(__a ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 351 |
import string
from math import logaa
def A(__a: str , __a: str ):
lowerCAmelCase_ = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
lowerCAmelCase_ = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A(__a: str , __a: str ):
lowerCAmelCase_ = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCAmelCase_ = corpus_without_punctuation.split("\n" )
lowerCAmelCase_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__a ))
def A(__a: int , __a: int , __a: List[Any]=False ):
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def A(__a: int , __a: int ):
return round(tf * idf , 3 )
| 22 | 0 |
lowerCamelCase__ = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 352 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase__ = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def A(__a: str , __a: List[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
return (preds == labels).mean()
def A(__a: Any , __a: Any ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = simple_accuracy(__a , __a )
lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def A(__a: List[str] , __a: Optional[int] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = pearsonr(__a , __a )[0]
lowerCAmelCase_ = spearmanr(__a , __a )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def A(__a: Union[str, Any] , __a: Any , __a: str ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(__a , __a )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "mrpc":
return acc_and_fa(__a , __a )
elif task_name == "sts-b":
return pearson_and_spearman(__a , __a )
elif task_name == "qqp":
return acc_and_fa(__a , __a )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__a , __a )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__a , __a )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "rte":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "hans":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
if len(__a ) != len(__a ):
raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
| 22 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''openai-gpt'''
lowerCamelCase__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _a=40478 , _a=512 , _a=768 , _a=12 , _a=12 , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.1 , _a=1E-5 , _a=0.0_2 , _a="cls_index" , _a=True , _a=None , _a=True , _a=0.1 , **_a , ) -> int:
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = n_positions
lowerCAmelCase_ = n_embd
lowerCAmelCase_ = n_layer
lowerCAmelCase_ = n_head
lowerCAmelCase_ = afn
lowerCAmelCase_ = resid_pdrop
lowerCAmelCase_ = embd_pdrop
lowerCAmelCase_ = attn_pdrop
lowerCAmelCase_ = layer_norm_epsilon
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = summary_type
lowerCAmelCase_ = summary_use_proj
lowerCAmelCase_ = summary_activation
lowerCAmelCase_ = summary_first_dropout
lowerCAmelCase_ = summary_proj_to_labels
super().__init__(**_a )
| 353 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViTImageProcessor'''
lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , _a=None , _a=None , **_a ) -> Tuple:
lowerCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _a , )
lowerCAmelCase_ = kwargs.pop("feature_extractor" )
lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_a , _a )
def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict:
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a )
if visual_prompt is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if images is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if visual_prompt is not None and images is not None:
lowerCAmelCase_ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase_ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def __a ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.batch_decode(*_a , **_a )
def __a ( self , *_a , **_a ) -> Optional[int]:
return self.tokenizer.decode(*_a , **_a )
@property
def __a ( self ) -> List[str]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , )
return self.image_processor_class
@property
def __a ( self ) -> Optional[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , )
return self.image_processor
| 22 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = ReformerTokenizer
lowerCamelCase__ = ReformerTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = True
def __a ( self ) -> List[Any]:
super().setUp()
lowerCAmelCase_ = ReformerTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> int:
lowerCAmelCase_ = "<s>"
lowerCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __a ( self ) -> int:
lowerCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_a ) , 1000 )
def __a ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __a ( self ) -> Dict:
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = "I was born in 92000, and this is falsé."
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __a ( self , _a=15 ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
# Simple input
lowerCAmelCase_ = "This is a simple input"
lowerCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ = ("This is a simple input", "This is a pair")
lowerCAmelCase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding="max_length" )
# Simple input
self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding="max_length" )
# Simple input
self.assertRaises(
_a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding="max_length" , )
# Pair input
self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding="max_length" )
# Pair input
self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding="max_length" )
# Pair input
self.assertRaises(
_a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding="max_length" , )
def __a ( self ) -> str:
pass
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = ReformerTokenizer(_a , keep_accents=_a )
lowerCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __a ( self ) -> str:
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = "Hello World!"
lowerCAmelCase_ = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __a ( self ) -> str:
lowerCAmelCase_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
lowerCAmelCase_ = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@require_torch
@slow
def __a ( self ) -> Tuple:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowerCAmelCase_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase_ = " ".join(_a )
lowerCAmelCase_ = self.big_tokenizer.encode_plus(_a , return_tensors="pt" )
lowerCAmelCase_ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
lowerCAmelCase_ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowerCAmelCase_ = encoded_sequence["input_ids"].shape
lowerCAmelCase_ = ReformerModel(_a )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_a )
model(**_a )
@slow
def __a ( self ) -> List[str]:
# fmt: off
lowerCAmelCase_ = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowerCAmelCase_ = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=_a , sequences=_a , )
| 354 |
import datasets
lowerCamelCase__ = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
lowerCamelCase__ = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
lowerCamelCase__ = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def A(__a: Dict , __a: Union[str, Any] ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
def __a ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def __a ( self , _a , _a ) -> List[str]:
return {"accuracy": simple_accuracy(_a , _a )}
| 22 | 0 |
import torch
from transformers import AutoModel
class __magic_name__ (torch.nn.Module ):
def __init__( self , _a="sayef/fsner-bert-base-uncased" ) -> Optional[Any]:
super(_a , self ).__init__()
lowerCAmelCase_ = AutoModel.from_pretrained(_a , return_dict=_a )
lowerCAmelCase_ = torch.nn.CosineSimilarity(3 , 1E-08 )
lowerCAmelCase_ = torch.nn.Softmax(dim=1 )
def __a ( self , **_a ) -> Dict:
return self.bert(**_a ).last_hidden_state
def __a ( self , _a ) -> str:
return token_embeddings.sum(2 , keepdim=_a )
def __a ( self , _a , _a , _a=1 ) -> Any:
return self.softmax(T * self.cos(_a , _a ) )
def __a ( self , _a , _a ) -> Optional[Any]:
lowerCAmelCase_ = W_supports["sizes"].tolist()
lowerCAmelCase_ = W_supports["start_token_id"].item()
lowerCAmelCase_ = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowerCAmelCase_ = self.BERT(**_a )
lowerCAmelCase_ = self.BERT(**_a )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = W_supports["input_ids"] == start_token_id
lowerCAmelCase_ = W_supports["input_ids"] == end_token_id
for i, size in enumerate(_a ):
if i == 0:
lowerCAmelCase_ = 0
else:
lowerCAmelCase_ = support_sizes[i - 1]
lowerCAmelCase_ = S[s : s + size][start_token_masks[s : s + size]]
lowerCAmelCase_ = S[s : s + size][end_token_masks[s : s + size]]
lowerCAmelCase_ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowerCAmelCase_ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowerCAmelCase_ = torch.vstack((p_starts, p_start) )
lowerCAmelCase_ = torch.vstack((p_ends, p_end) )
else:
lowerCAmelCase_ = p_start
lowerCAmelCase_ = p_end
return p_starts, p_ends
| 355 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowerCamelCase__ = '''bert-base-cased'''
lowerCamelCase__ = '''google/pegasus-xsum'''
lowerCamelCase__ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowerCamelCase__ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random'''
lowerCamelCase__ = '''sshleifer/bart-tiny-random'''
lowerCamelCase__ = '''sshleifer/tiny-mbart'''
lowerCamelCase__ = '''sshleifer/tiny-marian-en-de'''
def A(__a: Path , __a: list ):
lowerCAmelCase_ = "\n".join(__a )
Path(__a ).open("w" ).writelines(__a )
def A(__a: str ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__a , F"{split}.source" ) , __a )
_dump_articles(os.path.join(__a , F"{split}.target" ) , __a )
return tmp_dir
class __magic_name__ (__lowercase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __a ( self , _a ) -> Dict:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
lowerCAmelCase_ = 4
lowerCAmelCase_ = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowerCAmelCase_ , lowerCAmelCase_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , )
lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_a , _a )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowerCAmelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __a ( self , _a ) -> str:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
lowerCAmelCase_ = 4
lowerCAmelCase_ = LegacySeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=20 , max_target_length=_a , )
lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowerCAmelCase_ = tmp_dir.joinpath("train.source" ).open().readlines()
lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_a , _a , 128 , _a )
lowerCAmelCase_ = {x.name for x in tmp_dir.iterdir()}
lowerCAmelCase_ = {x.name for x in save_dir.iterdir()}
lowerCAmelCase_ = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_a ) < len(_a )
assert len(_a ) == 1
assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def __a ( self ) -> Any:
if not FAIRSEQ_AVAILABLE:
return
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=64 )
lowerCAmelCase_ = 64
lowerCAmelCase_ = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a )
lowerCAmelCase_ = [len(_a ) for x in batch_sampler]
assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_a ) == len(_a ) # no dropped or added examples
lowerCAmelCase_ = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for batch in data_loader:
lowerCAmelCase_ = batch["input_ids"].shape
lowerCAmelCase_ = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowerCAmelCase_ = np.product(batch["input_ids"].shape )
num_src_per_batch.append(_a )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_a )
assert num_src_per_batch[0] == max(_a )
if failures:
raise AssertionError(f"too many tokens in {len(_a )} batches" )
def __a ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=512 )
lowerCAmelCase_ = 2
lowerCAmelCase_ = ds.make_sortish_sampler(_a , shuffle=_a )
lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a )
lowerCAmelCase_ = tokenizer.pad_token_id
def count_pad_tokens(_a , _a="input_ids" ):
return [batch[k].eq(_a ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_a , k="labels" ) ) < sum(count_pad_tokens(_a , k="labels" ) )
assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) )
assert len(_a ) == len(_a )
def __a ( self , _a=1000 , _a=128 ) -> str:
if os.getenv("USE_REAL_DATA" , _a ):
lowerCAmelCase_ = "examples/seq2seq/wmt_en_ro"
lowerCAmelCase_ = max_len * 2 * 64
if not Path(_a ).joinpath("train.len" ).exists():
save_len_file(_a , _a )
else:
lowerCAmelCase_ = "examples/seq2seq/test_data/wmt_en_ro"
lowerCAmelCase_ = max_len * 4
save_len_file(_a , _a )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , n_obs=_a , )
return ds, max_tokens, tokenizer
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset()
lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) )
lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) )
assert idsa.intersection(_a ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __a ( self , _a ) -> List[str]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , use_fast=_a )
if tok_name == MBART_TINY:
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
lowerCAmelCase_ = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
lowerCAmelCase_ = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
| 22 | 0 |
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ (__lowercase ):
def __a ( self ) -> Dict:
lowerCAmelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , "embed_dim" ) )
self.parent.assertTrue(hasattr(_a , "num_heads" ) )
class __magic_name__ :
def __init__( self , _a , _a=13 , _a=64 , _a=3 , _a=[16, 48, 96] , _a=[1, 3, 6] , _a=[1, 2, 10] , _a=[7, 3, 3] , _a=[4, 2, 2] , _a=[2, 1, 1] , _a=[2, 2, 2] , _a=[False, False, True] , _a=[0.0, 0.0, 0.0] , _a=0.0_2 , _a=1E-12 , _a=True , _a=True , _a=2 , ) -> List[str]:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_sizes
lowerCAmelCase_ = patch_stride
lowerCAmelCase_ = patch_padding
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = num_heads
lowerCAmelCase_ = stride_kv
lowerCAmelCase_ = depth
lowerCAmelCase_ = cls_token
lowerCAmelCase_ = attention_drop_rate
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ = self.get_config()
return config, pixel_values, labels
def __a ( self ) -> Dict:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __a ( self , _a , _a , _a ) -> Optional[Any]:
lowerCAmelCase_ = CvtModel(config=_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a )
lowerCAmelCase_ = (self.image_size, self.image_size)
lowerCAmelCase_ , lowerCAmelCase_ = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowerCAmelCase_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowerCAmelCase_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __a ( self , _a , _a , _a ) -> str:
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = CvtForImageClassification(_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ (__lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase__ = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __a ( self ) -> str:
lowerCAmelCase_ = CvtModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def __a ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self ) -> Tuple:
return
@unittest.skip(reason="Cvt does not output attentions" )
def __a ( self ) -> Tuple:
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def __a ( self ) -> Tuple:
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def __a ( self ) -> str:
pass
def __a ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_a )
lowerCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _a )
def __a ( self ) -> str:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __a ( self ) -> Union[str, Any]:
def check_hidden_states_output(_a , _a , _a ):
lowerCAmelCase_ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(_a , _a ) )
lowerCAmelCase_ = outputs.hidden_states
lowerCAmelCase_ = len(self.model_tester.depth )
self.assertEqual(len(_a ) , _a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ = True
check_hidden_states_output(_a , _a , _a )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __a ( self ) -> List[Any]:
pass
@slow
def __a ( self ) -> int:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = CvtModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def A():
lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ (unittest.TestCase ):
@cached_property
def __a ( self ) -> int:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __a ( self ) -> List[str]:
lowerCAmelCase_ = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=_a , return_tensors="pt" ).to(_a )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**_a )
# verify the logits
lowerCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
lowerCAmelCase_ = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
| 356 |
def A(__a: Optional[Any] ):
lowerCAmelCase_ = len(__a )
lowerCAmelCase_ = sum(__a )
lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowerCAmelCase_ = True
for i in range(1 , s + 1 ):
lowerCAmelCase_ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowerCAmelCase_ = dp[i][j - 1]
if arr[i - 1] <= j:
lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowerCAmelCase_ = s - 2 * j
break
return diff
| 22 | 0 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCamelCase__ = '''\
'''
lowerCamelCase__ = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
lowerCamelCase__ = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
def __a ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def __a ( self , _a , _a , _a = 16 , _a = True , _a=None ) -> Any:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
lowerCAmelCase_ = "cuda"
else:
lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(_a )
lowerCAmelCase_ = model.to(_a )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
lowerCAmelCase_ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
lowerCAmelCase_ = model.config.max_length - 1
else:
lowerCAmelCase_ = model.config.max_length
lowerCAmelCase_ = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors="pt" , return_attention_mask=_a , ).to(_a )
lowerCAmelCase_ = encodings["input_ids"]
lowerCAmelCase_ = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
lowerCAmelCase_ = []
lowerCAmelCase_ = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
lowerCAmelCase_ = min(start_index + batch_size , len(_a ) )
lowerCAmelCase_ = encoded_texts[start_index:end_index]
lowerCAmelCase_ = attn_masks[start_index:end_index]
if add_start_token:
lowerCAmelCase_ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
lowerCAmelCase_ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
lowerCAmelCase_ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
lowerCAmelCase_ = encoded_batch
with torch.no_grad():
lowerCAmelCase_ = model(_a , attention_mask=_a ).logits
lowerCAmelCase_ = out_logits[..., :-1, :].contiguous()
lowerCAmelCase_ = labels[..., 1:].contiguous()
lowerCAmelCase_ = attn_mask[..., 1:].contiguous()
lowerCAmelCase_ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 357 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def A(__a: Any , __a: Union[str, Any] , __a: List[str] ):
lowerCAmelCase_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase_ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowerCAmelCase_ = F"{src_lang}-{tgt_lang}"
lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(__a , exist_ok=__a )
lowerCAmelCase_ = os.path.join(__a , "README.md" )
print(F"Generating {path}" )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(__a )
# make sure we are under the root of the project
lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCamelCase__ = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''')
lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 22 | 0 |
from itertools import permutations
def A(__a: tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase_ = [7, 11, 13, 17]
for i, test in enumerate(__a ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def A(__a: int = 10 ):
return sum(
int("".join(map(__a , __a ) ) )
for num in permutations(range(__a ) )
if is_substring_divisible(__a ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 358 |
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def A(__a: str ):
re.sub("<n>" , "" , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) )
| 22 | 0 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCamelCase__ = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def A(__a: List[str]=None ):
if subparsers is not None:
lowerCAmelCase_ = subparsers.add_parser("tpu-config" , description=_description )
else:
lowerCAmelCase_ = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
lowerCAmelCase_ = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=__a , default=__a , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=__a , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=__a , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
lowerCAmelCase_ = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=__a , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=__a )
return parser
def A(__a: str ):
lowerCAmelCase_ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__a ):
lowerCAmelCase_ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowerCAmelCase_ = defaults.command_file
if not args.command and defaults.commands is not None:
lowerCAmelCase_ = defaults.commands
if not args.tpu_name:
lowerCAmelCase_ = defaults.tpu_name
if not args.tpu_zone:
lowerCAmelCase_ = defaults.tpu_zone
if args.accelerate_version == "dev":
lowerCAmelCase_ = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
lowerCAmelCase_ = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , __a ):
lowerCAmelCase_ = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
lowerCAmelCase_ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __a ):
lowerCAmelCase_ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowerCAmelCase_ = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
lowerCAmelCase_ = "; ".join(__a )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowerCAmelCase_ = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(__a )}" )
return
subprocess.run(__a )
print("Successfully setup pod." )
def A():
lowerCAmelCase_ = tpu_command_parser()
lowerCAmelCase_ = parser.parse_args()
tpu_command_launcher(__a )
| 359 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class __magic_name__ (__lowercase ):
def __init__( self , *_a , **_a ) -> None:
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , _a , )
super().__init__(*_a , **_a )
| 360 |
import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''bertabs'''
def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]:
super().__init__(**_a )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_pos
lowerCAmelCase_ = enc_layers
lowerCAmelCase_ = enc_hidden_size
lowerCAmelCase_ = enc_heads
lowerCAmelCase_ = enc_ff_size
lowerCAmelCase_ = enc_dropout
lowerCAmelCase_ = dec_layers
lowerCAmelCase_ = dec_hidden_size
lowerCAmelCase_ = dec_heads
lowerCAmelCase_ = dec_ff_size
lowerCAmelCase_ = dec_dropout
| 22 | 0 |
lowerCamelCase__ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCamelCase__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCamelCase__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 361 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A(__a: Tuple , __a: Union[str, Any] ):
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = {}
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["quant_conv.bias"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a )
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a )
}
for i in range(__a ):
lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
for i in range(__a ):
lowerCAmelCase_ = num_up_blocks - 1 - i
lowerCAmelCase_ = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
return new_checkpoint
def A(__a: str , __a: str , ):
# Only support V1
lowerCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
lowerCAmelCase_ = io.BytesIO(r.content )
lowerCAmelCase_ = OmegaConf.load(__a )
lowerCAmelCase_ = 512
lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
lowerCAmelCase_ = {}
with safe_open(__a , framework="pt" , device="cpu" ) as f:
for key in f.keys():
lowerCAmelCase_ = f.get_tensor(__a )
else:
lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"]
# Convert the VAE model.
lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a )
lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a )
lowerCAmelCase_ = AutoencoderKL(**__a )
vae.load_state_dict(__a )
vae.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
lowerCamelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 22 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> int:
lowerCAmelCase_ = tempfile.mkdtemp()
lowerCAmelCase_ = BlipImageProcessor()
lowerCAmelCase_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
lowerCAmelCase_ = BlipProcessor(_a , _a )
processor.save_pretrained(self.tmpdirname )
def __a ( self , **_a ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).tokenizer
def __a ( self , **_a ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def __a ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase_ = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __a ( self ) -> Any:
lowerCAmelCase_ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase_ = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
lowerCAmelCase_ = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = BlipProcessor(tokenizer=_a , image_processor=_a )
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = image_processor(_a , return_tensors="np" )
lowerCAmelCase_ = processor(images=_a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = BlipProcessor(tokenizer=_a , image_processor=_a )
lowerCAmelCase_ = "lower newer"
lowerCAmelCase_ = processor(text=_a )
lowerCAmelCase_ = tokenizer(_a , return_token_type_ids=_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = BlipProcessor(tokenizer=_a , image_processor=_a )
lowerCAmelCase_ = "lower newer"
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __a ( self ) -> int:
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = BlipProcessor(tokenizer=_a , image_processor=_a )
lowerCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ = processor.batch_decode(_a )
lowerCAmelCase_ = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __a ( self ) -> int:
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = BlipProcessor(tokenizer=_a , image_processor=_a )
lowerCAmelCase_ = "lower newer"
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = processor(text=_a , images=_a )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 362 |
def A():
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCamelCase__ = generate_large_matrix()
lowerCamelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A(__a: list[list[int]] ):
assert all(row == sorted(__a , reverse=__a ) for row in grid )
assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) )
def A(__a: list[int] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(__a ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCAmelCase_ = (left + right) // 2
lowerCAmelCase_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCAmelCase_ = mid + 1
else:
lowerCAmelCase_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__a )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(grid[0] )
for i in range(len(__a ) ):
lowerCAmelCase_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__a ) * len(grid[0] )) - total
def A(__a: list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
for row in grid:
for i, number in enumerate(__a ):
if number < 0:
total += len(__a ) - i
break
return total
def A():
from timeit import timeit
print("Running benchmarks" )
lowerCAmelCase_ = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 22 | 0 |
import functools
from typing import Any
def A(__a: str , __a: list[str] ):
# Validation
if not isinstance(__a , __a ) or len(__a ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(__a , __a ) or not all(
isinstance(__a , __a ) and len(__a ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
lowerCAmelCase_ = {}
lowerCAmelCase_ = "WORD_KEEPER"
for word in words:
lowerCAmelCase_ = trie
for c in word:
if c not in trie_node:
lowerCAmelCase_ = {}
lowerCAmelCase_ = trie_node[c]
lowerCAmelCase_ = True
lowerCAmelCase_ = len(__a )
# Dynamic programming method
@functools.cache
def is_breakable(__a: int ) -> bool:
if index == len_string:
return True
lowerCAmelCase_ = trie
for i in range(__a , __a ):
lowerCAmelCase_ = trie_node.get(string[i] , __a )
if trie_node is None:
return False
if trie_node.get(__a , __a ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 363 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def A(__a: Dict ):
lowerCAmelCase_ = r"\w+[.]\d+"
lowerCAmelCase_ = re.findall(__a , __a )
for pat in pats:
lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def A(__a: str , __a: Tuple , __a: List[Any] ):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A(__a: Dict , __a: Any , __a: List[Any]=42 ):
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) )
lowerCAmelCase_ = flatten_dict(__a )
lowerCAmelCase_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase_ = rename_key(__a )
lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(__a )
return unflatten_dict(__a )
| 22 | 0 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __magic_name__ (__lowercase ):
def __init__( self , _a = None , _a = None , _a = None , _a = None , _a = False , _a = False , _a = None , **_a , ) -> Dict:
lowerCAmelCase_ = path_or_paths
lowerCAmelCase_ = split if split or isinstance(_a , _a ) else "train"
lowerCAmelCase_ = features
lowerCAmelCase_ = cache_dir
lowerCAmelCase_ = keep_in_memory
lowerCAmelCase_ = streaming
lowerCAmelCase_ = num_proc
lowerCAmelCase_ = kwargs
@abstractmethod
def __a ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class __magic_name__ (__lowercase ):
def __init__( self , _a = None , _a = None , _a = False , _a = False , _a = None , **_a , ) -> List[str]:
lowerCAmelCase_ = features
lowerCAmelCase_ = cache_dir
lowerCAmelCase_ = keep_in_memory
lowerCAmelCase_ = streaming
lowerCAmelCase_ = num_proc
lowerCAmelCase_ = kwargs
@abstractmethod
def __a ( self ) -> Union[Dataset, IterableDataset]:
pass
| 364 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = LDMTextToImagePipeline
lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS - {
'''negative_prompt''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
'''prompt_embeds''',
}
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ = False
def __a ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowerCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ = CLIPTextModel(_a )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vqvae": vae,
"bert": text_encoder,
"tokenizer": tokenizer,
}
return components
def __a ( self , _a , _a=0 ) -> Union[str, Any]:
if str(_a ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(_a )
else:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = LDMTextToImagePipeline(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = pipe(**_a ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
lowerCAmelCase_ = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self , _a , _a=torch.floataa , _a=0 ) -> int:
lowerCAmelCase_ = torch.manual_seed(_a )
lowerCAmelCase_ = np.random.RandomState(_a ).standard_normal((1, 4, 32, 32) )
lowerCAmelCase_ = torch.from_numpy(_a ).to(device=_a , dtype=_a )
lowerCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> str:
lowerCAmelCase_ = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_inputs(_a )
lowerCAmelCase_ = pipe(**_a ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase_ = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
lowerCAmelCase_ = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self , _a , _a=torch.floataa , _a=0 ) -> List[str]:
lowerCAmelCase_ = torch.manual_seed(_a )
lowerCAmelCase_ = np.random.RandomState(_a ).standard_normal((1, 4, 32, 32) )
lowerCAmelCase_ = torch.from_numpy(_a ).to(device=_a , dtype=_a )
lowerCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_inputs(_a )
lowerCAmelCase_ = pipe(**_a ).images[0]
lowerCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" )
lowerCAmelCase_ = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 365 |
import math
def A(__a: int ):
return math.sqrt(__a ) * math.sqrt(__a ) == num
def A(__a: int ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = n
while left <= right:
lowerCAmelCase_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase_ = mid - 1
else:
lowerCAmelCase_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __magic_name__ (__lowercase ):
lowerCamelCase__ = DistilBertTokenizer
lowerCamelCase__ = DistilBertTokenizerFast
lowerCamelCase__ = True
@slow
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 366 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def A(__a: Dict , __a: List[str]=None ):
require_version(deps[pkg] , __a )
| 22 | 0 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 367 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ''' Hello world! cécé herlolip'''
lowerCamelCase__ = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def A(__a: Any ):
lowerCAmelCase_ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ):
lowerCAmelCase_ = dct.pop(__a )
lowerCAmelCase_ = val
def A(__a: Tuple ):
lowerCAmelCase_ = torch.load(__a , map_location="cpu" )
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def A(__a: List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape
lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a )
lowerCAmelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ):
if not os.path.exists(__a ):
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval()
else:
lowerCAmelCase_ = load_xsum_checkpoint(__a )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
lowerCAmelCase_ = checkpoint_path.replace("." , "-" )
lowerCAmelCase_ = BartConfig.from_pretrained(__a )
lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 )
lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(__a , __a ).all():
raise ValueError(
F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
lowerCAmelCase_ = bart.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(__a , __a , __a )
lowerCAmelCase_ = BartForSequenceClassification(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a )
lowerCAmelCase_ = model(__a )[0] # logits
else: # no classification heads to worry about
lowerCAmelCase_ = bart.model.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"]
lowerCAmelCase_ = bart.extract_features(__a )
if hf_checkpoint_name == "facebook/bart-large":
lowerCAmelCase_ = BartModel(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = model(__a ).model[0]
else:
lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt
model.model.load_state_dict(__a )
if hasattr(__a , "lm_head" ):
lowerCAmelCase_ = make_linear_from_emb(model.model.shared )
lowerCAmelCase_ = model.model(__a )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
lowerCamelCase__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 22 | 0 |
from heapq import heappop, heappush
import numpy as np
def A(__a: np.ndarray , __a: tuple[int, int] , __a: tuple[int, int] , __a: bool , ):
lowerCAmelCase_ , lowerCAmelCase_ = grid.shape
lowerCAmelCase_ = [-1, 1, 0, 0]
lowerCAmelCase_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCAmelCase_ , lowerCAmelCase_ = [(0, source)], set()
lowerCAmelCase_ = np.full((rows, cols) , np.inf )
lowerCAmelCase_ = 0
lowerCAmelCase_ = np.empty((rows, cols) , dtype=__a )
lowerCAmelCase_ = None
while queue:
((lowerCAmelCase_) , (lowerCAmelCase_)) = heappop(__a )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCAmelCase_ = []
while (x, y) != source:
path.append((x, y) )
lowerCAmelCase_ , lowerCAmelCase_ = predecessors[x, y]
path.append(__a ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__a ) ):
lowerCAmelCase_ , lowerCAmelCase_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCAmelCase_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__a , (dist + 1, (nx, ny)) )
lowerCAmelCase_ = dist + 1
lowerCAmelCase_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = MobileBertTokenizer
lowerCamelCase__ = MobileBertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = filter_non_english
lowerCamelCase__ = '''google/mobilebert-uncased'''
def __a ( self ) -> Optional[Any]:
super().setUp()
lowerCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowerCAmelCase_ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __a ( self , _a ) -> Any:
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = "unwanted, running"
return input_text, output_text
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def __a ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __a ( self ) -> Dict:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCAmelCase_ = {}
for i, token in enumerate(_a ):
lowerCAmelCase_ = i
lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __a ( self ) -> Optional[int]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __a ( self ) -> List[str]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __a ( self ) -> Dict:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __a ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCAmelCase_ = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False
lowerCAmelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = ["的", "人", "有"]
lowerCAmelCase_ = "".join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = True
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = False
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 22 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = CTRLTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = False
def __a ( self ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
lowerCAmelCase_ = dict(zip(_a , range(len(_a ) ) ) )
lowerCAmelCase_ = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
lowerCAmelCase_ = {"unk_token": "<unk>"}
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_a ) )
def __a ( self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_a )
def __a ( self , _a ) -> List[Any]:
lowerCAmelCase_ = "adapt react readapt apt"
lowerCAmelCase_ = "adapt react readapt apt"
return input_text, output_text
def __a ( self ) -> str:
lowerCAmelCase_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ = "adapt react readapt apt"
lowerCAmelCase_ = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
lowerCAmelCase_ = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokens + [tokenizer.unk_token]
lowerCAmelCase_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
| 369 |
import math
from collections.abc import Iterator
from itertools import takewhile
def A(__a: int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A():
lowerCAmelCase_ = 2
while True:
if is_prime(__a ):
yield num
num += 1
def A(__a: int = 200_0000 ):
return sum(takewhile(lambda __a : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 22 | 0 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase (unittest.TestCase ):
@property
def __a ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.dummy_uncond_unet
lowerCAmelCase_ = PNDMScheduler()
lowerCAmelCase_ = PNDMPipeline(unet=_a , scheduler=_a )
pndm.to(_a )
pndm.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pndm(generator=_a , num_inference_steps=20 , output_type="numpy" ).images
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pndm(generator=_a , num_inference_steps=20 , output_type="numpy" , return_dict=_a )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _UpperCAmelCase (unittest.TestCase ):
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = "google/ddpm-cifar10-32"
lowerCAmelCase_ = UNetaDModel.from_pretrained(_a )
lowerCAmelCase_ = PNDMScheduler()
lowerCAmelCase_ = PNDMPipeline(unet=_a , scheduler=_a )
pndm.to(_a )
pndm.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pndm(generator=_a , output_type="numpy" ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ = np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 370 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''mobilenet_v2'''
def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = depth_multiplier
lowerCAmelCase_ = depth_divisible_by
lowerCAmelCase_ = min_depth
lowerCAmelCase_ = expand_ratio
lowerCAmelCase_ = output_stride
lowerCAmelCase_ = first_layer_is_expansion
lowerCAmelCase_ = finegrained_output
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = tf_padding
lowerCAmelCase_ = classifier_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = semantic_loss_ignore_index
class __magic_name__ (__lowercase ):
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __a ( self ) -> float:
return 1E-4
| 22 | 0 |
from itertools import product
def A(__a: int , __a: int ):
lowerCAmelCase_ = sides_number
lowerCAmelCase_ = max_face_number * dice_number
lowerCAmelCase_ = [0] * (max_total + 1)
lowerCAmelCase_ = 1
lowerCAmelCase_ = range(__a , max_face_number + 1 )
for dice_numbers in product(__a , repeat=__a ):
lowerCAmelCase_ = sum(__a )
totals_frequencies[total] += 1
return totals_frequencies
def A():
lowerCAmelCase_ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCAmelCase_ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCAmelCase_ = 0
lowerCAmelCase_ = 9
lowerCAmelCase_ = 4 * 9
lowerCAmelCase_ = 6
for peter_total in range(__a , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCAmelCase_ = (4**9) * (6**6)
lowerCAmelCase_ = peter_wins_count / total_games_number
lowerCAmelCase_ = round(__a , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 371 |
from __future__ import annotations
def A(__a: dict , __a: str ):
lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start]
while stack:
lowerCAmelCase_ = stack.pop()
explored.add(__a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__a )
return explored
lowerCamelCase__ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 22 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ['''input_features''']
def __init__( self , _a=80 , _a=16000 , _a=160 , _a=30 , _a=400 , _a=0.0 , _a=False , **_a , ) -> List[str]:
super().__init__(
feature_size=_a , sampling_rate=_a , padding_value=_a , return_attention_mask=_a , **_a , )
lowerCAmelCase_ = n_fft
lowerCAmelCase_ = hop_length
lowerCAmelCase_ = chunk_length
lowerCAmelCase_ = chunk_length * sampling_rate
lowerCAmelCase_ = self.n_samples // hop_length
lowerCAmelCase_ = sampling_rate
lowerCAmelCase_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_a , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=_a , norm="slaney" , mel_scale="slaney" , )
def __a ( self , _a ) -> np.ndarray:
lowerCAmelCase_ = spectrogram(
_a , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
lowerCAmelCase_ = log_spec[:, :-1]
lowerCAmelCase_ = np.maximum(_a , log_spec.max() - 8.0 )
lowerCAmelCase_ = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __a ( _a , _a , _a = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
lowerCAmelCase_ = np.array(_a , np.intaa )
lowerCAmelCase_ = []
for vector, length in zip(_a , attention_mask.sum(-1 ) ):
lowerCAmelCase_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowerCAmelCase_ = padding_value
normed_input_values.append(_a )
else:
lowerCAmelCase_ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self , _a , _a = True , _a = None , _a = None , _a = None , _a = "max_length" , _a = None , _a = None , _a = None , **_a , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
lowerCAmelCase_ = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
lowerCAmelCase_ = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ = [np.asarray([raw_speech] ).T]
lowerCAmelCase_ = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
lowerCAmelCase_ = self.pad(
_a , padding=_a , max_length=max_length if max_length else self.n_samples , truncation=_a , pad_to_multiple_of=_a , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase_ = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
lowerCAmelCase_ = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
lowerCAmelCase_ = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
lowerCAmelCase_ = [self._np_extract_fbank_features(_a ) for waveform in input_features[0]]
if isinstance(input_features[0] , _a ):
lowerCAmelCase_ = [np.asarray(_a , dtype=np.floataa ) for feature in input_features]
else:
lowerCAmelCase_ = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase_ = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase_ = padded_inputs.convert_to_tensors(_a )
return padded_inputs
def __a ( self ) -> Dict[str, Any]:
lowerCAmelCase_ = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 350 |
def A(__a: Tuple ):
lowerCAmelCase_ = len(__a )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )]
# Reverse whole list
lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 22 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __magic_name__ (__lowercase ):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class __magic_name__ (__lowercase , __lowercase ):
lowerCamelCase__ = 1
@register_to_config
def __init__( self , _a = 2000 , _a = 0.1_5 , _a = 0.0_1 , _a = 1348.0 , _a = 1E-5 , _a = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
lowerCAmelCase_ = sigma_max
# setable values
lowerCAmelCase_ = None
self.set_sigmas(_a , _a , _a , _a )
def __a ( self , _a , _a = None ) -> torch.FloatTensor:
return sample
def __a ( self , _a , _a = None , _a = None ) -> Any:
lowerCAmelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCAmelCase_ = torch.linspace(1 , _a , _a , device=_a )
def __a ( self , _a , _a = None , _a = None , _a = None ) -> Optional[int]:
lowerCAmelCase_ = sigma_min if sigma_min is not None else self.config.sigma_min
lowerCAmelCase_ = sigma_max if sigma_max is not None else self.config.sigma_max
lowerCAmelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_a , _a )
lowerCAmelCase_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCAmelCase_ = torch.exp(torch.linspace(math.log(_a ) , math.log(_a ) , _a ) )
lowerCAmelCase_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __a ( self , _a , _a ) -> int:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __a ( self , _a , _a , _a , _a = None , _a = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
lowerCAmelCase_ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCAmelCase_ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCAmelCase_ = timesteps.to(self.discrete_sigmas.device )
lowerCAmelCase_ = self.discrete_sigmas[timesteps].to(sample.device )
lowerCAmelCase_ = self.get_adjacent_sigma(_a , _a ).to(sample.device )
lowerCAmelCase_ = torch.zeros_like(_a )
lowerCAmelCase_ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCAmelCase_ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCAmelCase_ = diffusion.unsqueeze(-1 )
lowerCAmelCase_ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCAmelCase_ = randn_tensor(
sample.shape , layout=sample.layout , generator=_a , device=sample.device , dtype=sample.dtype )
lowerCAmelCase_ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCAmelCase_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_a , prev_sample_mean=_a )
def __a ( self , _a , _a , _a = None , _a = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCAmelCase_ = randn_tensor(sample.shape , layout=sample.layout , generator=_a ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCAmelCase_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowerCAmelCase_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowerCAmelCase_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCAmelCase_ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCAmelCase_ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCAmelCase_ = step_size.unsqueeze(-1 )
lowerCAmelCase_ = sample + step_size * model_output
lowerCAmelCase_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __a ( self , _a , _a , _a , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowerCAmelCase_ = timesteps.to(original_samples.device )
lowerCAmelCase_ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCAmelCase_ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_a ) * sigmas[:, None, None, None]
)
lowerCAmelCase_ = noise + original_samples
return noisy_samples
def __len__( self ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 351 |
import string
from math import logaa
def A(__a: str , __a: str ):
lowerCAmelCase_ = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
lowerCAmelCase_ = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A(__a: str , __a: str ):
lowerCAmelCase_ = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCAmelCase_ = corpus_without_punctuation.split("\n" )
lowerCAmelCase_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__a ))
def A(__a: int , __a: int , __a: List[Any]=False ):
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def A(__a: int , __a: int ):
return round(tf * idf , 3 )
| 22 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase__ = {'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 352 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase__ = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def A(__a: str , __a: List[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
return (preds == labels).mean()
def A(__a: Any , __a: Any ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = simple_accuracy(__a , __a )
lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def A(__a: List[str] , __a: Optional[int] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = pearsonr(__a , __a )[0]
lowerCAmelCase_ = spearmanr(__a , __a )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def A(__a: Union[str, Any] , __a: Any , __a: str ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(__a , __a )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "mrpc":
return acc_and_fa(__a , __a )
elif task_name == "sts-b":
return pearson_and_spearman(__a , __a )
elif task_name == "qqp":
return acc_and_fa(__a , __a )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__a , __a )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__a , __a )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "rte":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "hans":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
if len(__a ) != len(__a ):
raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
| 22 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = DDIMPipeline
lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase__ = False
def __a ( self ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
lowerCAmelCase_ = DDIMScheduler()
lowerCAmelCase_ = {"unet": unet, "scheduler": scheduler}
return components
def __a ( self , _a , _a=0 ) -> List[Any]:
if str(_a ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(_a )
else:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = "cpu"
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = pipe(**_a ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowerCAmelCase_ = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
lowerCAmelCase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1E-3 )
def __a ( self ) -> Tuple:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __a ( self ) -> Union[str, Any]:
super().test_save_load_local(expected_max_difference=3E-3 )
def __a ( self ) -> Dict:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __a ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = "google/ddpm-cifar10-32"
lowerCAmelCase_ = UNetaDModel.from_pretrained(_a )
lowerCAmelCase_ = DDIMScheduler()
lowerCAmelCase_ = DDIMPipeline(unet=_a , scheduler=_a )
ddim.to(_a )
ddim.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = ddim(generator=_a , eta=0.0 , output_type="numpy" ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = "google/ddpm-ema-bedroom-256"
lowerCAmelCase_ = UNetaDModel.from_pretrained(_a )
lowerCAmelCase_ = DDIMScheduler.from_pretrained(_a )
lowerCAmelCase_ = DDIMPipeline(unet=_a , scheduler=_a )
ddpm.to(_a )
ddpm.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = ddpm(generator=_a , output_type="numpy" ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase_ = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 353 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViTImageProcessor'''
lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , _a=None , _a=None , **_a ) -> Tuple:
lowerCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _a , )
lowerCAmelCase_ = kwargs.pop("feature_extractor" )
lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_a , _a )
def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict:
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a )
if visual_prompt is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if images is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if visual_prompt is not None and images is not None:
lowerCAmelCase_ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase_ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def __a ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.batch_decode(*_a , **_a )
def __a ( self , *_a , **_a ) -> Optional[int]:
return self.tokenizer.decode(*_a , **_a )
@property
def __a ( self ) -> List[str]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , )
return self.image_processor_class
@property
def __a ( self ) -> Optional[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , )
return self.image_processor
| 22 | 0 |
def A(__a: int ):
lowerCAmelCase_ = int(__a )
if n_element < 1:
lowerCAmelCase_ = ValueError("a should be a positive number" )
raise my_error
lowerCAmelCase_ = [1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = (0, 0, 0)
lowerCAmelCase_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter the last number (nth term) of the Hamming Number Series: ''')
print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''')
lowerCamelCase__ = hamming(int(n))
print('''-----------------------------------------------------''')
print(F'''The list with nth numbers is: {hamming_numbers}''')
print('''-----------------------------------------------------''')
| 354 |
import datasets
lowerCamelCase__ = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
lowerCamelCase__ = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
lowerCamelCase__ = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def A(__a: Dict , __a: Union[str, Any] ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
def __a ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def __a ( self , _a , _a ) -> List[str]:
return {"accuracy": simple_accuracy(_a , _a )}
| 22 | 0 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A(__a: Union[str, Any] , __a: Tuple ):
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A(__a: Any , __a: int , __a: int ):
lowerCAmelCase_ = tmp_path / "cache"
lowerCAmelCase_ = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase_ = TextDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_text_dataset(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def A(__a: Optional[int] , __a: Tuple , __a: List[str] ):
lowerCAmelCase_ = tmp_path / "cache"
lowerCAmelCase_ = {"text": "string"}
lowerCAmelCase_ = features.copy() if features else default_expected_features
lowerCAmelCase_ = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_ = TextDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_text_dataset(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A(__a: Optional[Any] , __a: Tuple , __a: Optional[int] ):
lowerCAmelCase_ = tmp_path / "cache"
lowerCAmelCase_ = {"text": "string"}
lowerCAmelCase_ = TextDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_text_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def A(__a: Union[str, Any] , __a: List[Any] , __a: str ):
if issubclass(__a , __a ):
lowerCAmelCase_ = text_path
elif issubclass(__a , __a ):
lowerCAmelCase_ = [text_path]
lowerCAmelCase_ = tmp_path / "cache"
lowerCAmelCase_ = {"text": "string"}
lowerCAmelCase_ = TextDatasetReader(__a , cache_dir=__a ).read()
_check_text_dataset(__a , __a )
def A(__a: Dict , __a: List[str] , __a: Optional[Any]=("train",) ):
assert isinstance(__a , __a )
for split in splits:
lowerCAmelCase_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A(__a: Tuple , __a: str , __a: str ):
lowerCAmelCase_ = tmp_path / "cache"
lowerCAmelCase_ = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase_ = TextDatasetReader({"train": text_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_text_datasetdict(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def A(__a: Any , __a: int , __a: Dict ):
lowerCAmelCase_ = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowerCAmelCase_ = {"text": "string"}
lowerCAmelCase_ = features.copy() if features else default_expected_features
lowerCAmelCase_ = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_ = TextDatasetReader({"train": text_path} , features=__a , cache_dir=__a ).read()
_check_text_datasetdict(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A(__a: Any , __a: Union[str, Any] , __a: Union[str, Any] ):
if split:
lowerCAmelCase_ = {split: text_path}
else:
lowerCAmelCase_ = "train"
lowerCAmelCase_ = {"train": text_path, "test": text_path}
lowerCAmelCase_ = tmp_path / "cache"
lowerCAmelCase_ = {"text": "string"}
lowerCAmelCase_ = TextDatasetReader(__a , cache_dir=__a ).read()
_check_text_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 355 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowerCamelCase__ = '''bert-base-cased'''
lowerCamelCase__ = '''google/pegasus-xsum'''
lowerCamelCase__ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowerCamelCase__ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random'''
lowerCamelCase__ = '''sshleifer/bart-tiny-random'''
lowerCamelCase__ = '''sshleifer/tiny-mbart'''
lowerCamelCase__ = '''sshleifer/tiny-marian-en-de'''
def A(__a: Path , __a: list ):
lowerCAmelCase_ = "\n".join(__a )
Path(__a ).open("w" ).writelines(__a )
def A(__a: str ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__a , F"{split}.source" ) , __a )
_dump_articles(os.path.join(__a , F"{split}.target" ) , __a )
return tmp_dir
class __magic_name__ (__lowercase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __a ( self , _a ) -> Dict:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
lowerCAmelCase_ = 4
lowerCAmelCase_ = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowerCAmelCase_ , lowerCAmelCase_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , )
lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_a , _a )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowerCAmelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __a ( self , _a ) -> str:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
lowerCAmelCase_ = 4
lowerCAmelCase_ = LegacySeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=20 , max_target_length=_a , )
lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowerCAmelCase_ = tmp_dir.joinpath("train.source" ).open().readlines()
lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_a , _a , 128 , _a )
lowerCAmelCase_ = {x.name for x in tmp_dir.iterdir()}
lowerCAmelCase_ = {x.name for x in save_dir.iterdir()}
lowerCAmelCase_ = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_a ) < len(_a )
assert len(_a ) == 1
assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def __a ( self ) -> Any:
if not FAIRSEQ_AVAILABLE:
return
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=64 )
lowerCAmelCase_ = 64
lowerCAmelCase_ = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a )
lowerCAmelCase_ = [len(_a ) for x in batch_sampler]
assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_a ) == len(_a ) # no dropped or added examples
lowerCAmelCase_ = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for batch in data_loader:
lowerCAmelCase_ = batch["input_ids"].shape
lowerCAmelCase_ = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowerCAmelCase_ = np.product(batch["input_ids"].shape )
num_src_per_batch.append(_a )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_a )
assert num_src_per_batch[0] == max(_a )
if failures:
raise AssertionError(f"too many tokens in {len(_a )} batches" )
def __a ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=512 )
lowerCAmelCase_ = 2
lowerCAmelCase_ = ds.make_sortish_sampler(_a , shuffle=_a )
lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a )
lowerCAmelCase_ = tokenizer.pad_token_id
def count_pad_tokens(_a , _a="input_ids" ):
return [batch[k].eq(_a ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_a , k="labels" ) ) < sum(count_pad_tokens(_a , k="labels" ) )
assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) )
assert len(_a ) == len(_a )
def __a ( self , _a=1000 , _a=128 ) -> str:
if os.getenv("USE_REAL_DATA" , _a ):
lowerCAmelCase_ = "examples/seq2seq/wmt_en_ro"
lowerCAmelCase_ = max_len * 2 * 64
if not Path(_a ).joinpath("train.len" ).exists():
save_len_file(_a , _a )
else:
lowerCAmelCase_ = "examples/seq2seq/test_data/wmt_en_ro"
lowerCAmelCase_ = max_len * 4
save_len_file(_a , _a )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , n_obs=_a , )
return ds, max_tokens, tokenizer
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset()
lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) )
lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) )
assert idsa.intersection(_a ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __a ( self , _a ) -> List[str]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , use_fast=_a )
if tok_name == MBART_TINY:
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
lowerCAmelCase_ = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
lowerCAmelCase_ = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
| 22 | 0 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = TypeVar('''DatasetType''', Dataset, IterableDataset)
def A(__a: List[DatasetType] , __a: Optional[List[float]] = None , __a: Optional[int] = None , __a: Optional[DatasetInfo] = None , __a: Optional[NamedSplit] = None , __a: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(__a ):
if not isinstance(__a , (Dataset, IterableDataset) ):
if isinstance(__a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"is an empty dataset dictionary." )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(__a )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__a ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__a ).__name__}." )
if i == 0:
lowerCAmelCase_ , lowerCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__a , __a ) else (IterableDataset, Dataset)
)
elif not isinstance(__a , __a ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__a , __a , __a , info=__a , split=__a , stopping_strategy=__a )
else:
return _interleave_iterable_datasets(
__a , __a , __a , info=__a , split=__a , stopping_strategy=__a )
def A(__a: List[DatasetType] , __a: Optional[DatasetInfo] = None , __a: Optional[NamedSplit] = None , __a: int = 0 , ):
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(__a ):
if not isinstance(__a , (Dataset, IterableDataset) ):
if isinstance(__a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"is an empty dataset dictionary." )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(__a )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__a ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__a ).__name__}." )
if i == 0:
lowerCAmelCase_ , lowerCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__a , __a ) else (IterableDataset, Dataset)
)
elif not isinstance(__a , __a ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__a , info=__a , split=__a , axis=__a )
else:
return _concatenate_iterable_datasets(__a , info=__a , split=__a , axis=__a )
| 356 |
def A(__a: Optional[Any] ):
lowerCAmelCase_ = len(__a )
lowerCAmelCase_ = sum(__a )
lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowerCAmelCase_ = True
for i in range(1 , s + 1 ):
lowerCAmelCase_ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowerCAmelCase_ = dp[i][j - 1]
if arr[i - 1] <= j:
lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowerCAmelCase_ = s - 2 * j
break
return diff
| 22 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __magic_name__ (__lowercase ):
def __init__( self , _a ) -> Optional[Any]:
lowerCAmelCase_ = data
def __iter__( self ) -> int:
for element in self.data:
yield element
def A(__a: Union[str, Any]=True ):
lowerCAmelCase_ = Accelerator(even_batches=__a )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def A(__a: Accelerator , __a: int , __a: int , __a: bool = False ):
if iterable:
lowerCAmelCase_ = DummyIterableDataset(torch.as_tensor(range(__a ) ) )
else:
lowerCAmelCase_ = TensorDataset(torch.as_tensor(range(__a ) ) )
lowerCAmelCase_ = DataLoader(__a , batch_size=__a )
lowerCAmelCase_ = accelerator.prepare(__a )
return dl
def A(__a: Accelerator , __a: int , __a: int , __a: List[int] , __a: List[int] , ):
lowerCAmelCase_ = create_dataloader(accelerator=__a , dataset_size=__a , batch_size=__a )
lowerCAmelCase_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def A():
lowerCAmelCase_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def A():
lowerCAmelCase_ = create_accelerator(even_batches=__a )
verify_dataloader_batch_sizes(
__a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def A():
lowerCAmelCase_ = create_accelerator(even_batches=__a )
lowerCAmelCase_ = torch.nn.Linear(1 , 1 )
lowerCAmelCase_ = accelerator.prepare(__a )
lowerCAmelCase_ = create_dataloader(__a , dataset_size=3 , batch_size=1 )
lowerCAmelCase_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__a ):
lowerCAmelCase_ = ddp_model(batch[0].float() )
lowerCAmelCase_ = output.sum()
loss.backward()
batch_idxs.append(__a )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def A(__a: List[str] ):
with warnings.catch_warnings(record=__a ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __a )
assert "only supported for multi-GPU" in str(w[-1].message )
def A():
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = create_accelerator(even_batches=__a )
lowerCAmelCase_ = torch.nn.Linear(1 , 1 )
lowerCAmelCase_ = accelerator.prepare(__a )
lowerCAmelCase_ = create_dataloader(__a , dataset_size=3 , batch_size=1 )
lowerCAmelCase_ = create_dataloader(__a , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__a ):
lowerCAmelCase_ = train_dl.batch_sampler.even_batches
lowerCAmelCase_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def A():
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = create_accelerator(even_batches=__a )
lowerCAmelCase_ = torch.nn.Linear(1 , 1 )
lowerCAmelCase_ = accelerator.prepare(__a )
create_dataloader(__a , dataset_size=3 , batch_size=1 , iterable=__a )
lowerCAmelCase_ = create_dataloader(__a , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__a ):
lowerCAmelCase_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def A():
lowerCAmelCase_ = create_accelerator()
lowerCAmelCase_ = torch.nn.Linear(1 , 1 )
lowerCAmelCase_ = accelerator.prepare(__a )
create_dataloader(__a , dataset_size=3 , batch_size=1 , iterable=__a )
with warnings.catch_warnings(record=__a ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__a ):
pass
assert issubclass(w[-1].category , __a )
assert "only supported for map-style datasets" in str(w[-1].message )
def A():
lowerCAmelCase_ = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
lowerCAmelCase_ = accelerator.state.distributed_type
lowerCAmelCase_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__a )
lowerCAmelCase_ = original_state
if __name__ == "__main__":
main()
| 357 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def A(__a: Any , __a: Union[str, Any] , __a: List[str] ):
lowerCAmelCase_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase_ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowerCAmelCase_ = F"{src_lang}-{tgt_lang}"
lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(__a , exist_ok=__a )
lowerCAmelCase_ = os.path.join(__a , "README.md" )
print(F"Generating {path}" )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(__a )
# make sure we are under the root of the project
lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCamelCase__ = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''')
lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 22 | 0 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def __a ( *_a , **_a ) -> Tuple:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __magic_name__ (unittest.TestCase ):
lowerCamelCase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __a ( self , _a , _a , _a ) -> Union[str, Any]:
lowerCAmelCase_ = ObjectDetectionPipeline(model=_a , image_processor=_a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __a ( self , _a , _a ) -> Optional[Any]:
lowerCAmelCase_ = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(_a ) , 0 )
for detected_object in outputs:
self.assertEqual(
_a , {
"score": ANY(_a ),
"label": ANY(_a ),
"box": {"xmin": ANY(_a ), "ymin": ANY(_a ), "xmax": ANY(_a ), "ymax": ANY(_a )},
} , )
import datasets
lowerCAmelCase_ = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
lowerCAmelCase_ = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
lowerCAmelCase_ = object_detector(_a , threshold=0.0 )
self.assertEqual(len(_a ) , len(_a ) )
for outputs in batch_outputs:
self.assertGreater(len(_a ) , 0 )
for detected_object in outputs:
self.assertEqual(
_a , {
"score": ANY(_a ),
"label": ANY(_a ),
"box": {"xmin": ANY(_a ), "ymin": ANY(_a ), "xmax": ANY(_a ), "ymax": ANY(_a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __a ( self ) -> Union[str, Any]:
pass
@require_torch
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = "hf-internal-testing/tiny-detr-mobilenetsv3"
lowerCAmelCase_ = AutoModelForObjectDetection.from_pretrained(_a )
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(_a )
lowerCAmelCase_ = ObjectDetectionPipeline(model=_a , feature_extractor=_a )
lowerCAmelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
lowerCAmelCase_ = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __a ( self ) -> int:
lowerCAmelCase_ = "facebook/detr-resnet-50"
lowerCAmelCase_ = AutoModelForObjectDetection.from_pretrained(_a )
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(_a )
lowerCAmelCase_ = ObjectDetectionPipeline(model=_a , feature_extractor=_a )
lowerCAmelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
lowerCAmelCase_ = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __a ( self ) -> Tuple:
lowerCAmelCase_ = "facebook/detr-resnet-50"
lowerCAmelCase_ = pipeline("object-detection" , model=_a )
lowerCAmelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
lowerCAmelCase_ = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __a ( self ) -> List[str]:
lowerCAmelCase_ = 0.9_9_8_5
lowerCAmelCase_ = "facebook/detr-resnet-50"
lowerCAmelCase_ = pipeline("object-detection" , model=_a )
lowerCAmelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=_a )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __a ( self ) -> Tuple:
lowerCAmelCase_ = "Narsil/layoutlmv3-finetuned-funsd"
lowerCAmelCase_ = 0.9_9_9_3
lowerCAmelCase_ = pipeline("object-detection" , model=_a , threshold=_a )
lowerCAmelCase_ = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"score": 0.9_9_9_3, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9_9_9_3, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 358 |
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def A(__a: str ):
re.sub("<n>" , "" , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) )
| 22 | 0 |
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowerCamelCase__ = '''bert-base-cased'''
lowerCamelCase__ = '''google/pegasus-xsum'''
lowerCamelCase__ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowerCamelCase__ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random'''
lowerCamelCase__ = '''sshleifer/bart-tiny-random'''
lowerCamelCase__ = '''sshleifer/tiny-mbart'''
lowerCamelCase__ = '''sshleifer/tiny-marian-en-de'''
def A(__a: Path , __a: list ):
lowerCAmelCase_ = "\n".join(__a )
Path(__a ).open("w" ).writelines(__a )
def A(__a: str ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__a , F"{split}.source" ) , __a )
_dump_articles(os.path.join(__a , F"{split}.target" ) , __a )
return tmp_dir
class __magic_name__ (__lowercase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __a ( self , _a ) -> Dict:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
lowerCAmelCase_ = 4
lowerCAmelCase_ = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowerCAmelCase_ , lowerCAmelCase_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , )
lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_a , _a )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowerCAmelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __a ( self , _a ) -> str:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
lowerCAmelCase_ = 4
lowerCAmelCase_ = LegacySeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=20 , max_target_length=_a , )
lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowerCAmelCase_ = tmp_dir.joinpath("train.source" ).open().readlines()
lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_a , _a , 128 , _a )
lowerCAmelCase_ = {x.name for x in tmp_dir.iterdir()}
lowerCAmelCase_ = {x.name for x in save_dir.iterdir()}
lowerCAmelCase_ = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_a ) < len(_a )
assert len(_a ) == 1
assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def __a ( self ) -> Any:
if not FAIRSEQ_AVAILABLE:
return
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=64 )
lowerCAmelCase_ = 64
lowerCAmelCase_ = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a )
lowerCAmelCase_ = [len(_a ) for x in batch_sampler]
assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_a ) == len(_a ) # no dropped or added examples
lowerCAmelCase_ = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for batch in data_loader:
lowerCAmelCase_ = batch["input_ids"].shape
lowerCAmelCase_ = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowerCAmelCase_ = np.product(batch["input_ids"].shape )
num_src_per_batch.append(_a )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_a )
assert num_src_per_batch[0] == max(_a )
if failures:
raise AssertionError(f"too many tokens in {len(_a )} batches" )
def __a ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=512 )
lowerCAmelCase_ = 2
lowerCAmelCase_ = ds.make_sortish_sampler(_a , shuffle=_a )
lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a )
lowerCAmelCase_ = tokenizer.pad_token_id
def count_pad_tokens(_a , _a="input_ids" ):
return [batch[k].eq(_a ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_a , k="labels" ) ) < sum(count_pad_tokens(_a , k="labels" ) )
assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) )
assert len(_a ) == len(_a )
def __a ( self , _a=1000 , _a=128 ) -> str:
if os.getenv("USE_REAL_DATA" , _a ):
lowerCAmelCase_ = "examples/seq2seq/wmt_en_ro"
lowerCAmelCase_ = max_len * 2 * 64
if not Path(_a ).joinpath("train.len" ).exists():
save_len_file(_a , _a )
else:
lowerCAmelCase_ = "examples/seq2seq/test_data/wmt_en_ro"
lowerCAmelCase_ = max_len * 4
save_len_file(_a , _a )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , n_obs=_a , )
return ds, max_tokens, tokenizer
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset()
lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) )
lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) )
assert idsa.intersection(_a ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __a ( self , _a ) -> List[str]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , use_fast=_a )
if tok_name == MBART_TINY:
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
lowerCAmelCase_ = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
lowerCAmelCase_ = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
| 359 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def A():
lowerCAmelCase_ = cn.convert_to_negative(__a )
# assert negative_img array for at least one True
assert negative_img.any()
def A():
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__a , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def A():
lowerCAmelCase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def A():
lowerCAmelCase_ = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase_ = canny.canny(__a )
# assert canny array for at least one True
assert canny_array.any()
def A():
assert gg.gaussian_filter(__a , 5 , sigma=0.9 ).all()
def A():
# laplace diagonals
lowerCAmelCase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCAmelCase_ = conv.img_convolve(__a , __a ).astype(__a )
assert res.any()
def A():
assert med.median_filter(__a , 3 ).any()
def A():
lowerCAmelCase_ , lowerCAmelCase_ = sob.sobel_filter(__a )
assert grad.any() and theta.any()
def A():
lowerCAmelCase_ = sp.make_sepia(__a , 20 )
assert sepia.all()
def A(__a: str = "digital_image_processing/image_data/lena_small.jpg" ):
lowerCAmelCase_ = bs.Burkes(imread(__a , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def A(__a: str = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCAmelCase_ = rs.NearestNeighbour(imread(__a , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def A():
lowerCAmelCase_ = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
lowerCAmelCase_ = imread(__a , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
lowerCAmelCase_ = image[x_coordinate][y_coordinate]
lowerCAmelCase_ = lbp.get_neighbors_pixel(
__a , __a , __a , __a )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCAmelCase_ = lbp.local_binary_value(__a , __a , __a )
assert lbp_image.any()
| 360 |
import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''bertabs'''
def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]:
super().__init__(**_a )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_pos
lowerCAmelCase_ = enc_layers
lowerCAmelCase_ = enc_hidden_size
lowerCAmelCase_ = enc_heads
lowerCAmelCase_ = enc_ff_size
lowerCAmelCase_ = enc_dropout
lowerCAmelCase_ = dec_layers
lowerCAmelCase_ = dec_hidden_size
lowerCAmelCase_ = dec_heads
lowerCAmelCase_ = dec_ff_size
lowerCAmelCase_ = dec_dropout
| 22 | 0 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=True , _a=False , _a=False , _a=False , _a=2 , _a=99 , _a=0 , _a=32 , _a=5 , _a=4 , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.0_2 , _a=2 , _a=4 , _a="last" , _a=True , _a=None , _a=0 , ) -> Union[str, Any]:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_input_lengths
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = gelu_activation
lowerCAmelCase_ = sinusoidal_embeddings
lowerCAmelCase_ = causal
lowerCAmelCase_ = asm
lowerCAmelCase_ = n_langs
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = n_special
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_choices
lowerCAmelCase_ = summary_type
lowerCAmelCase_ = use_proj
lowerCAmelCase_ = scope
lowerCAmelCase_ = bos_token_id
def __a ( self ) -> Dict:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ = None
if self.use_input_lengths:
lowerCAmelCase_ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase_ = None
if self.use_token_type_ids:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ = ids_tensor([self.batch_size] , 2 ).float()
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self ) -> List[str]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> List[str]:
lowerCAmelCase_ = XLMModel(config=_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a , lengths=_a , langs=_a )
lowerCAmelCase_ = model(_a , langs=_a )
lowerCAmelCase_ = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Optional[int]:
lowerCAmelCase_ = XLMWithLMHeadModel(_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> List[Any]:
lowerCAmelCase_ = XLMForQuestionAnsweringSimple(_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a )
lowerCAmelCase_ = model(_a , start_positions=_a , end_positions=_a )
lowerCAmelCase_ = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Optional[Any]:
lowerCAmelCase_ = XLMForQuestionAnswering(_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a )
lowerCAmelCase_ = model(
_a , start_positions=_a , end_positions=_a , cls_index=_a , is_impossible=_a , p_mask=_a , )
lowerCAmelCase_ = model(
_a , start_positions=_a , end_positions=_a , cls_index=_a , is_impossible=_a , )
((lowerCAmelCase_ ) , ) = result_with_labels.to_tuple()
lowerCAmelCase_ = model(_a , start_positions=_a , end_positions=_a )
((lowerCAmelCase_ ) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> int:
lowerCAmelCase_ = XLMForSequenceClassification(_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a )
lowerCAmelCase_ = model(_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Any:
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = XLMForTokenClassification(_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Tuple:
lowerCAmelCase_ = self.num_choices
lowerCAmelCase_ = XLMForMultipleChoice(config=_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self ) -> Dict:
lowerCAmelCase_ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) = config_and_inputs
lowerCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class __magic_name__ (__lowercase , __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase__ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCamelCase__ = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __a ( self , _a , _a , _a , _a , _a ) -> Optional[int]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self , _a , _a , _a=False ) -> str:
lowerCAmelCase_ = super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowerCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
lowerCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
return inputs_dict
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = XLMModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_a , emb_dim=37 )
def __a ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_a )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_a )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_a )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_a )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_a )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_a )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_a )
def __a ( self , _a , _a , _a , _a , _a , _a=False , _a=1 ) -> List[str]:
self.assertIsInstance(_a , _a )
self.assertListEqual(
[isinstance(_a , _a ) for iter_attentions in attentions] , [True] * len(_a ) )
self.assertEqual(len(_a ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_a ):
# adds PAD dummy token
lowerCAmelCase_ = min_length + idx + 1
lowerCAmelCase_ = min_length + idx + 1
lowerCAmelCase_ = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_a ) )
def __a ( self , _a , _a , _a , _a , _a , _a=False , _a=1 ) -> str:
self.assertIsInstance(_a , _a )
self.assertListEqual(
[isinstance(_a , _a ) for iter_hidden_states in hidden_states] , [True] * len(_a ) , )
self.assertEqual(len(_a ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_a ):
# adds PAD dummy token
lowerCAmelCase_ = min_length + idx + 1
lowerCAmelCase_ = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_a ) , )
pass
@slow
def __a ( self ) -> Dict:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = XLMModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __magic_name__ (unittest.TestCase ):
@slow
def __a ( self ) -> List[str]:
lowerCAmelCase_ = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(_a )
lowerCAmelCase_ = torch.tensor([[14, 447]] , dtype=torch.long , device=_a ) # the president
lowerCAmelCase_ = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowerCAmelCase_ = model.generate(_a , do_sample=_a )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _a )
| 361 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A(__a: Tuple , __a: Union[str, Any] ):
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = {}
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["quant_conv.bias"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a )
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a )
}
for i in range(__a ):
lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
for i in range(__a ):
lowerCAmelCase_ = num_up_blocks - 1 - i
lowerCAmelCase_ = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
return new_checkpoint
def A(__a: str , __a: str , ):
# Only support V1
lowerCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
lowerCAmelCase_ = io.BytesIO(r.content )
lowerCAmelCase_ = OmegaConf.load(__a )
lowerCAmelCase_ = 512
lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
lowerCAmelCase_ = {}
with safe_open(__a , framework="pt" , device="cpu" ) as f:
for key in f.keys():
lowerCAmelCase_ = f.get_tensor(__a )
else:
lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"]
# Convert the VAE model.
lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a )
lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a )
lowerCAmelCase_ = AutoencoderKL(**__a )
vae.load_state_dict(__a )
vae.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
lowerCamelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 22 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = RobertaTokenizer
lowerCamelCase__ = RobertaTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = {'''cls_token''': '''<s>'''}
def __a ( self ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCAmelCase_ = dict(zip(_a , range(len(_a ) ) ) )
lowerCAmelCase_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ = {"unk_token": "<unk>"}
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_a ) )
def __a ( self , **_a ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_a )
def __a ( self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __a ( self , _a ) -> Dict:
lowerCAmelCase_ = "lower newer"
lowerCAmelCase_ = "lower newer"
return input_text, output_text
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ = "lower newer"
lowerCAmelCase_ = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase_ = tokenizer.tokenize(_a ) # , add_prefix_space=True)
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokens + [tokenizer.unk_token]
lowerCAmelCase_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
def __a ( self ) -> int:
lowerCAmelCase_ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=_a ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=_a ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def __a ( self ) -> int:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("roberta-base" )
lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.encode(
"sequence builders" , add_special_tokens=_a , add_prefix_space=_a )
lowerCAmelCase_ = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=_a , add_prefix_space=_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = "Encode this sequence."
lowerCAmelCase_ = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_a , _a )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_a , _a )
# Testing spaces after special tokens
lowerCAmelCase_ = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(_a , lstrip=_a , rstrip=_a )} ) # mask token has a left space
lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(_a )
lowerCAmelCase_ = "Encode <mask> sequence"
lowerCAmelCase_ = "Encode <mask>sequence"
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = encoded.index(_a )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = encoded.index(_a )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_a , _a )
def __a ( self ) -> Any:
pass
def __a ( self ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = "A, <mask> AllenNLP sentence."
lowerCAmelCase_ = tokenizer_r.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a )
lowerCAmelCase_ = tokenizer_p.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_a , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
_a , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __a ( self ) -> Any:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
lowerCAmelCase_ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCAmelCase_ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , _a )
self.assertEqual(post_processor_state["add_prefix_space"] , _a )
self.assertEqual(post_processor_state["trim_offsets"] , _a )
def __a ( self ) -> Optional[int]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase_ = f"{text_of_1_token} {text_of_1_token}"
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
lowerCAmelCase_ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
lowerCAmelCase_ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
lowerCAmelCase_ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ), len(_a ) + 1 + len(_a )) , )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
lowerCAmelCase_ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ), len(_a ) + 1 + len(_a )) , )
lowerCAmelCase_ = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
lowerCAmelCase_ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
lowerCAmelCase_ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ), 1 + len(_a ) + 1 + len(_a )) , )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
lowerCAmelCase_ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ), 1 + len(_a ) + 1 + len(_a )) , )
| 362 |
def A():
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCamelCase__ = generate_large_matrix()
lowerCamelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A(__a: list[list[int]] ):
assert all(row == sorted(__a , reverse=__a ) for row in grid )
assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) )
def A(__a: list[int] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(__a ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCAmelCase_ = (left + right) // 2
lowerCAmelCase_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCAmelCase_ = mid + 1
else:
lowerCAmelCase_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__a )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(grid[0] )
for i in range(len(__a ) ):
lowerCAmelCase_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__a ) * len(grid[0] )) - total
def A(__a: list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
for row in grid:
for i, number in enumerate(__a ):
if number < 0:
total += len(__a ) - i
break
return total
def A():
from timeit import timeit
print("Running benchmarks" )
lowerCAmelCase_ = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 22 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase__ = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 363 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def A(__a: Dict ):
lowerCAmelCase_ = r"\w+[.]\d+"
lowerCAmelCase_ = re.findall(__a , __a )
for pat in pats:
lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def A(__a: str , __a: Tuple , __a: List[Any] ):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A(__a: Dict , __a: Any , __a: List[Any]=42 ):
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) )
lowerCAmelCase_ = flatten_dict(__a )
lowerCAmelCase_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase_ = rename_key(__a )
lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(__a )
return unflatten_dict(__a )
| 22 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''FNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''FNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FNetForMaskedLM''',
'''FNetForMultipleChoice''',
'''FNetForNextSentencePrediction''',
'''FNetForPreTraining''',
'''FNetForQuestionAnswering''',
'''FNetForSequenceClassification''',
'''FNetForTokenClassification''',
'''FNetLayer''',
'''FNetModel''',
'''FNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 364 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = BarthezTokenizer
lowerCamelCase__ = BarthezTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def __a ( self ) -> List[str]:
super().setUp()
lowerCAmelCase_ = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_a )
lowerCAmelCase_ = tokenizer
def __a ( self ) -> Any:
lowerCAmelCase_ = "<pad>"
lowerCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(_a ) , 101122 )
def __a ( self ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def __a ( self ) -> int:
lowerCAmelCase_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCAmelCase_ = [0, 57, 3018, 70307, 91, 2]
lowerCAmelCase_ = self.tokenizer(
_a , max_length=len(_a ) , padding=_a , truncation=_a , return_tensors="pt" )
self.assertIsInstance(_a , _a )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
lowerCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(_a , _a )
def __a ( self ) -> int:
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = "I was born in 92000, and this is falsé."
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
@slow
def __a ( self ) -> Dict:
# fmt: off
lowerCAmelCase_ = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowerCAmelCase_ = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=_a , )
| 365 |
import math
def A(__a: int ):
return math.sqrt(__a ) * math.sqrt(__a ) == num
def A(__a: int ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = n
while left <= right:
lowerCAmelCase_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase_ = mid - 1
else:
lowerCAmelCase_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class __magic_name__ (__lowercase ):
def __init__( self , *_a , **_a ) -> None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , _a , )
super().__init__(*_a , **_a )
| 366 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def A(__a: Dict , __a: List[str]=None ):
require_version(deps[pkg] , __a )
| 22 | 0 |
lowerCamelCase__ = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def A(__a: float ):
assert type(__a ) in (int, float) and decimal == int(__a )
lowerCAmelCase_ = int(__a )
lowerCAmelCase_ = ""
lowerCAmelCase_ = False
if decimal < 0:
lowerCAmelCase_ = True
decimal *= -1
while decimal > 0:
lowerCAmelCase_ , lowerCAmelCase_ = divmod(__a , 16 )
lowerCAmelCase_ = values[remainder] + hexadecimal
lowerCAmelCase_ = "0x" + hexadecimal
if negative:
lowerCAmelCase_ = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ''' Hello world! cécé herlolip'''
lowerCamelCase__ = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def A(__a: Any ):
lowerCAmelCase_ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ):
lowerCAmelCase_ = dct.pop(__a )
lowerCAmelCase_ = val
def A(__a: Tuple ):
lowerCAmelCase_ = torch.load(__a , map_location="cpu" )
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def A(__a: List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape
lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a )
lowerCAmelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ):
if not os.path.exists(__a ):
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval()
else:
lowerCAmelCase_ = load_xsum_checkpoint(__a )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
lowerCAmelCase_ = checkpoint_path.replace("." , "-" )
lowerCAmelCase_ = BartConfig.from_pretrained(__a )
lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 )
lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(__a , __a ).all():
raise ValueError(
F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
lowerCAmelCase_ = bart.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(__a , __a , __a )
lowerCAmelCase_ = BartForSequenceClassification(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a )
lowerCAmelCase_ = model(__a )[0] # logits
else: # no classification heads to worry about
lowerCAmelCase_ = bart.model.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"]
lowerCAmelCase_ = bart.extract_features(__a )
if hf_checkpoint_name == "facebook/bart-large":
lowerCAmelCase_ = BartModel(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = model(__a ).model[0]
else:
lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt
model.model.load_state_dict(__a )
if hasattr(__a , "lm_head" ):
lowerCAmelCase_ = make_linear_from_emb(model.model.shared )
lowerCAmelCase_ = model.model(__a )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
lowerCamelCase__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 22 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 368 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = MobileBertTokenizer
lowerCamelCase__ = MobileBertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = filter_non_english
lowerCamelCase__ = '''google/mobilebert-uncased'''
def __a ( self ) -> Optional[Any]:
super().setUp()
lowerCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowerCAmelCase_ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __a ( self , _a ) -> Any:
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = "unwanted, running"
return input_text, output_text
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def __a ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __a ( self ) -> Dict:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCAmelCase_ = {}
for i, token in enumerate(_a ):
lowerCAmelCase_ = i
lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __a ( self ) -> Optional[int]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __a ( self ) -> List[str]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __a ( self ) -> Dict:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __a ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCAmelCase_ = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False
lowerCAmelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = ["的", "人", "有"]
lowerCAmelCase_ = "".join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = True
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = False
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 22 | 0 |
"""simple docstring"""
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __magic_name__ (unittest.TestCase ):
def __init__( self , _a ) -> Any:
lowerCAmelCase_ = parent
def __a ( self ) -> Tuple:
return {}
def A():
lowerCAmelCase_ = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
lowerCAmelCase_ = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = MarkupLMFeatureExtractor if is_bsa_available() else None
def __a ( self ) -> List[str]:
lowerCAmelCase_ = MarkupLMFeatureExtractionTester(self )
@property
def __a ( self ) -> Optional[int]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def __a ( self ) -> Dict:
# Initialize feature_extractor
lowerCAmelCase_ = self.feature_extraction_class()
# Test not batched input
lowerCAmelCase_ = get_html_strings()[0]
lowerCAmelCase_ = feature_extractor(_a )
# fmt: off
lowerCAmelCase_ = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
lowerCAmelCase_ = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , _a )
self.assertEqual(encoding.xpaths , _a )
# Test batched
lowerCAmelCase_ = get_html_strings()
lowerCAmelCase_ = feature_extractor(_a )
# fmt: off
lowerCAmelCase_ = expected_nodes + [["My First Heading", "My first paragraph."]]
lowerCAmelCase_ = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , _a )
self.assertEqual(encoding.xpaths , _a )
| 369 |
import math
from collections.abc import Iterator
from itertools import takewhile
def A(__a: int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A():
lowerCAmelCase_ = 2
while True:
if is_prime(__a ):
yield num
num += 1
def A(__a: int = 200_0000 ):
return sum(takewhile(lambda __a : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 22 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase (__lowercase ):
lowerCamelCase__ = '''encoder-decoder'''
lowerCamelCase__ = True
def __init__( self , **_a ) -> Union[str, Any]:
super().__init__(**_a )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase_ = kwargs.pop("encoder" )
lowerCAmelCase_ = encoder_config.pop("model_type" )
lowerCAmelCase_ = kwargs.pop("decoder" )
lowerCAmelCase_ = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ = AutoConfig.for_model(_a , **_a )
lowerCAmelCase_ = AutoConfig.for_model(_a , **_a )
lowerCAmelCase_ = True
@classmethod
def __a ( cls , _a , _a , **_a ) -> PretrainedConfig:
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
lowerCAmelCase_ = True
lowerCAmelCase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_a )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ = self.encoder.to_dict()
lowerCAmelCase_ = self.decoder.to_dict()
lowerCAmelCase_ = self.__class__.model_type
return output
| 370 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''mobilenet_v2'''
def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = depth_multiplier
lowerCAmelCase_ = depth_divisible_by
lowerCAmelCase_ = min_depth
lowerCAmelCase_ = expand_ratio
lowerCAmelCase_ = output_stride
lowerCAmelCase_ = first_layer_is_expansion
lowerCAmelCase_ = finegrained_output
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = tf_padding
lowerCAmelCase_ = classifier_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = semantic_loss_ignore_index
class __magic_name__ (__lowercase ):
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __a ( self ) -> float:
return 1E-4
| 22 | 0 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def A(__a: Any , __a: Optional[int] ):
lowerCAmelCase_ = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
lowerCAmelCase_ = DatasetInfosDict.from_directory(__a )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ),
] , )
def A(__a: List[Any] , __a: DatasetInfo ):
lowerCAmelCase_ = str(__a )
dataset_info.write_to_directory(__a )
lowerCAmelCase_ = DatasetInfo.from_directory(__a )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__a , "dataset_info.json" ) )
def A():
lowerCAmelCase_ = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
lowerCAmelCase_ = dataset_info._to_yaml_dict()
assert sorted(__a ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCAmelCase_ = yaml.safe_dump(__a )
lowerCAmelCase_ = yaml.safe_load(__a )
assert dataset_info_yaml_dict == reloaded
def A():
lowerCAmelCase_ = DatasetInfo()
lowerCAmelCase_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def A(__a: List[str] , __a: DatasetInfosDict ):
lowerCAmelCase_ = str(__a )
dataset_infos_dict.write_to_directory(__a )
lowerCAmelCase_ = DatasetInfosDict.from_directory(__a )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCAmelCase_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCAmelCase_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__a , "README.md" ) )
| 371 |
from __future__ import annotations
def A(__a: dict , __a: str ):
lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start]
while stack:
lowerCAmelCase_ = stack.pop()
explored.add(__a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__a )
return explored
lowerCamelCase__ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 22 | 0 |
def A(__a: int = 100_0000 ):
lowerCAmelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __a ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 350 |
def A(__a: Tuple ):
lowerCAmelCase_ = len(__a )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )]
# Reverse whole list
lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 22 | 0 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ''''''
lowerCamelCase__ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCamelCase__ = None # compression type in fsspec. ex: "gzip"
lowerCamelCase__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , _a = "" , _a = None , _a = None , **_a ) -> str:
super().__init__(self , **_a )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCAmelCase_ = fsspec.open(
_a , mode="rb" , protocol=_a , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCAmelCase_ = os.path.basename(self.file.path.split("::" )[0] )
lowerCAmelCase_ = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
lowerCAmelCase_ = None
@classmethod
def __a ( cls , _a ) -> Optional[Any]:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_a ).lstrip("/" )
def __a ( self ) -> Tuple:
if self.dir_cache is None:
lowerCAmelCase_ = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
lowerCAmelCase_ = {f["name"]: f}
def __a ( self , _a ) -> List[str]:
return self.file.open().read()
def __a ( self , _a , _a = "rb" , _a=None , _a=True , _a=None , **_a , ) -> str:
lowerCAmelCase_ = self._strip_protocol(_a )
if mode != "rb":
raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''bz2'''
lowerCamelCase__ = '''bz2'''
lowerCamelCase__ = '''.bz2'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''gzip'''
lowerCamelCase__ = '''gzip'''
lowerCamelCase__ = '''.gz'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''lz4'''
lowerCamelCase__ = '''lz4'''
lowerCamelCase__ = '''.lz4'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''xz'''
lowerCamelCase__ = '''xz'''
lowerCamelCase__ = '''.xz'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''zstd'''
lowerCamelCase__ = '''zstd'''
lowerCamelCase__ = '''.zst'''
def __init__( self , _a , _a = "rb" , _a = None , _a = None , _a = DEFAULT_BLOCK_SIZE , **_a , ) -> Optional[Any]:
super().__init__(
fo=_a , mode=_a , target_protocol=_a , target_options=_a , block_size=_a , **_a , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCAmelCase_ = self.file.__enter__
class __magic_name__ :
def __init__( self , _a ) -> int:
lowerCAmelCase_ = file_
def __enter__( self ) -> Dict:
self._file.__enter__()
return self
def __exit__( self , *_a , **_a ) -> Dict:
self._file.__exit__(*_a , **_a )
def __iter__( self ) -> Optional[Any]:
return iter(self._file )
def __a ( self ) -> Optional[int]:
return next(self._file )
def __getattr__( self , _a ) -> Union[str, Any]:
return getattr(self._file , _a )
def fixed_enter(*_a , **_a ):
return WrappedFile(_enter(*_a , **_a ) )
lowerCAmelCase_ = fixed_enter
| 351 |
import string
from math import logaa
def A(__a: str , __a: str ):
lowerCAmelCase_ = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
lowerCAmelCase_ = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A(__a: str , __a: str ):
lowerCAmelCase_ = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCAmelCase_ = corpus_without_punctuation.split("\n" )
lowerCAmelCase_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__a ))
def A(__a: int , __a: int , __a: List[Any]=False ):
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def A(__a: int , __a: int ):
return round(tf * idf , 3 )
| 22 | 0 |
def A(__a: str , __a: str ):
lowerCAmelCase_ = len(__a )
lowerCAmelCase_ = []
for i in range(len(__a ) - pat_len + 1 ):
lowerCAmelCase_ = True
for j in range(__a ):
if s[i + j] != pattern[j]:
lowerCAmelCase_ = False
break
if match_found:
position.append(__a )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 352 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase__ = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def A(__a: str , __a: List[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
return (preds == labels).mean()
def A(__a: Any , __a: Any ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = simple_accuracy(__a , __a )
lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def A(__a: List[str] , __a: Optional[int] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = pearsonr(__a , __a )[0]
lowerCAmelCase_ = spearmanr(__a , __a )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def A(__a: Union[str, Any] , __a: Any , __a: str ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(__a , __a )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "mrpc":
return acc_and_fa(__a , __a )
elif task_name == "sts-b":
return pearson_and_spearman(__a , __a )
elif task_name == "qqp":
return acc_and_fa(__a , __a )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__a , __a )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__a , __a )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "rte":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "hans":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
if len(__a ) != len(__a ):
raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
| 22 | 0 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A(__a: Tuple , __a: Union[str, Any] ):
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = {}
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["quant_conv.bias"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a )
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a )
}
for i in range(__a ):
lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
for i in range(__a ):
lowerCAmelCase_ = num_up_blocks - 1 - i
lowerCAmelCase_ = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
return new_checkpoint
def A(__a: str , __a: str , ):
# Only support V1
lowerCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
lowerCAmelCase_ = io.BytesIO(r.content )
lowerCAmelCase_ = OmegaConf.load(__a )
lowerCAmelCase_ = 512
lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
lowerCAmelCase_ = {}
with safe_open(__a , framework="pt" , device="cpu" ) as f:
for key in f.keys():
lowerCAmelCase_ = f.get_tensor(__a )
else:
lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"]
# Convert the VAE model.
lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a )
lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a )
lowerCAmelCase_ = AutoencoderKL(**__a )
vae.load_state_dict(__a )
vae.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
lowerCamelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 353 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViTImageProcessor'''
lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , _a=None , _a=None , **_a ) -> Tuple:
lowerCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _a , )
lowerCAmelCase_ = kwargs.pop("feature_extractor" )
lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_a , _a )
def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict:
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a )
if visual_prompt is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if images is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if visual_prompt is not None and images is not None:
lowerCAmelCase_ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase_ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def __a ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.batch_decode(*_a , **_a )
def __a ( self , *_a , **_a ) -> Optional[int]:
return self.tokenizer.decode(*_a , **_a )
@property
def __a ( self ) -> List[str]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , )
return self.image_processor_class
@property
def __a ( self ) -> Optional[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , )
return self.image_processor
| 22 | 0 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = '''pytorch_model.bin'''
@dataclasses.dataclass
class __magic_name__ :
lowerCamelCase__ = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
lowerCamelCase__ = dataclasses.field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , )
@dataclasses.dataclass
class __magic_name__ :
lowerCamelCase__ = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
lowerCamelCase__ = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
lowerCamelCase__ = dataclasses.field(
default=__lowercase , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
lowerCamelCase__ = dataclasses.field(
default=__lowercase , metadata={'''help''': '''The name of the task to train on.'''} , )
lowerCamelCase__ = dataclasses.field(
default=__lowercase , metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class __magic_name__ :
lowerCamelCase__ = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
lowerCamelCase__ = dataclasses.field(
default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} )
lowerCamelCase__ = dataclasses.field(
default='''no''' , metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'''
} , )
lowerCamelCase__ = dataclasses.field(
default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
lowerCamelCase__ = dataclasses.field(
default=0.0 , metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
} , )
lowerCamelCase__ = dataclasses.field(
default=__lowercase , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , )
lowerCamelCase__ = dataclasses.field(
default=__lowercase , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , )
lowerCamelCase__ = dataclasses.field(
default=__lowercase , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , )
lowerCamelCase__ = dataclasses.field(
default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , )
lowerCamelCase__ = dataclasses.field(
default=100 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
lowerCamelCase__ = dataclasses.field(
default=__lowercase , metadata={'''help''': '''Random seed for initialization.'''} , )
def A(__a: Optional[int] , __a: Tuple , __a: Any , __a: Tuple , __a: int , __a: Any ):
lowerCAmelCase_ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
lowerCAmelCase_ = dataset.filter(lambda __a : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
lowerCAmelCase_ = int(eval_result * len(__a ) )
print(__a )
lowerCAmelCase_ = dataset.sort("probability" , reverse=__a )
lowerCAmelCase_ = dataset.select(range(__a ) )
lowerCAmelCase_ = dataset.remove_columns(["label", "probability"] )
lowerCAmelCase_ = dataset.rename_column("prediction" , "label" )
lowerCAmelCase_ = dataset.map(lambda __a : {"label": idalabel[example["label"]]} )
lowerCAmelCase_ = dataset.shuffle(seed=args.seed )
lowerCAmelCase_ = os.path.join(__a , F"train_pseudo.{args.data_file_extension}" )
if args.data_file_extension == "csv":
dataset.to_csv(__a , index=__a )
else:
dataset.to_json(__a )
def A(__a: List[Any] , __a: str , __a: Dict , __a: str , **__a: Union[str, Any] ):
lowerCAmelCase_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
lowerCAmelCase_ = STModelArguments(model_name_or_path=__a )
lowerCAmelCase_ = STDataArguments(train_file=__a , infer_file=__a )
lowerCAmelCase_ = STTrainingArguments(output_dir=__a )
lowerCAmelCase_ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__a ).items():
setattr(__a , __a , __a )
for key, value in kwargs.items():
if hasattr(__a , __a ):
setattr(__a , __a , __a )
# Sanity checks
lowerCAmelCase_ = {}
lowerCAmelCase_ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
lowerCAmelCase_ = args.train_file
lowerCAmelCase_ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
lowerCAmelCase_ = args.eval_file
for key in data_files:
lowerCAmelCase_ = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], F"`{key}_file` should be a csv or a json file."
if args.data_file_extension is None:
lowerCAmelCase_ = extension
else:
assert extension == args.data_file_extension, F"`{key}_file` should be a {args.data_file_extension} file`."
assert (
args.eval_metric in datasets.list_metrics()
), F"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
lowerCAmelCase_ = F"{args.output_dir}/self-train_iter-{{}}".format
lowerCAmelCase_ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__a )
os.makedirs(__a , exist_ok=__a )
accelerator.wait_for_everyone()
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = 0
lowerCAmelCase_ = False
# Show the progress bar
lowerCAmelCase_ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
lowerCAmelCase_ = data_dir_format(__a )
assert os.path.exists(__a )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
lowerCAmelCase_ = os.path.join(__a , "stage-1" )
lowerCAmelCase_ = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__a , __a ):
arguments_dict.update({key: value} )
lowerCAmelCase_ = os.path.join(__a , "best-checkpoint" , __a )
if os.path.exists(__a ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , __a , __a , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , __a )
finetune(**__a )
accelerator.wait_for_everyone()
assert os.path.exists(__a )
logger.info("Self-training job completed: iteration: %d, stage: 1." , __a )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
lowerCAmelCase_ = os.path.join(__a , "best-checkpoint" )
lowerCAmelCase_ = os.path.join(__a , "stage-2" )
# Update arguments_dict
lowerCAmelCase_ = model_path
lowerCAmelCase_ = data_files["train"]
lowerCAmelCase_ = current_output_dir
lowerCAmelCase_ = os.path.join(__a , "best-checkpoint" , __a )
if os.path.exists(__a ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , __a , __a , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , __a )
finetune(**__a )
accelerator.wait_for_everyone()
assert os.path.exists(__a )
logger.info("Self-training job completed: iteration: %d, stage: 2." , __a )
lowerCAmelCase_ = iteration
lowerCAmelCase_ = data_dir_format(iteration + 1 )
lowerCAmelCase_ = AutoConfig.from_pretrained(os.path.join(__a , "best-checkpoint" ) )
lowerCAmelCase_ = config.idalabel
lowerCAmelCase_ = os.path.join(__a , "eval_results_best-checkpoint.json" )
lowerCAmelCase_ = os.path.join(__a , "test_results_best-checkpoint.json" )
assert os.path.exists(__a )
with open(__a , "r" ) as f:
lowerCAmelCase_ = float(json.load(__a )[args.eval_metric] )
lowerCAmelCase_ = os.path.join(__a , "infer_output_best-checkpoint.csv" )
assert os.path.exists(__a )
# Loading the dataset from local csv or json files.
lowerCAmelCase_ = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
lowerCAmelCase_ = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(__a , exist_ok=__a )
shutil.copy(__a , os.path.join(__a , F"eval_results_iter-{iteration}.json" ) )
if os.path.exists(__a ):
shutil.copy(__a , os.path.join(__a , F"test_results_iter-{iteration}.json" ) )
create_pseudo_labeled_data(__a , __a , __a , __a , __a , __a )
accelerator.wait_for_everyone()
lowerCAmelCase_ = os.path.join(__a , F"train_pseudo.{args.data_file_extension}" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
lowerCAmelCase_ = eval_result
if best_iteration is None:
lowerCAmelCase_ = new_iteration
lowerCAmelCase_ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
lowerCAmelCase_ = new_iteration
lowerCAmelCase_ = new_eval_result
lowerCAmelCase_ = 0
else:
if new_eval_result == best_eval_result:
lowerCAmelCase_ = new_iteration
lowerCAmelCase_ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
lowerCAmelCase_ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , __a )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , __a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__a , F"eval_results_iter-{iteration}.json" ) , os.path.join(__a , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , __a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__a , F"eval_results_iter-{args.max_selftrain_iterations - 1}.json" ) , os.path.join(__a , "eval_results_best-iteration.json" ) , )
| 354 |
import datasets
lowerCamelCase__ = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
lowerCamelCase__ = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
lowerCamelCase__ = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def A(__a: Dict , __a: Union[str, Any] ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
def __a ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def __a ( self , _a , _a ) -> List[str]:
return {"accuracy": simple_accuracy(_a , _a )}
| 22 | 0 |
from ..utils import DummyObject, requires_backends
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''note_seq''']
def __init__( self , *_a , **_a ) -> Optional[Any]:
requires_backends(self , ["note_seq"] )
@classmethod
def __a ( cls , *_a , **_a ) -> List[str]:
requires_backends(cls , ["note_seq"] )
@classmethod
def __a ( cls , *_a , **_a ) -> List[Any]:
requires_backends(cls , ["note_seq"] )
| 355 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowerCamelCase__ = '''bert-base-cased'''
lowerCamelCase__ = '''google/pegasus-xsum'''
lowerCamelCase__ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowerCamelCase__ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random'''
lowerCamelCase__ = '''sshleifer/bart-tiny-random'''
lowerCamelCase__ = '''sshleifer/tiny-mbart'''
lowerCamelCase__ = '''sshleifer/tiny-marian-en-de'''
def A(__a: Path , __a: list ):
lowerCAmelCase_ = "\n".join(__a )
Path(__a ).open("w" ).writelines(__a )
def A(__a: str ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__a , F"{split}.source" ) , __a )
_dump_articles(os.path.join(__a , F"{split}.target" ) , __a )
return tmp_dir
class __magic_name__ (__lowercase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __a ( self , _a ) -> Dict:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
lowerCAmelCase_ = 4
lowerCAmelCase_ = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowerCAmelCase_ , lowerCAmelCase_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , )
lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_a , _a )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowerCAmelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __a ( self , _a ) -> str:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
lowerCAmelCase_ = 4
lowerCAmelCase_ = LegacySeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=20 , max_target_length=_a , )
lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowerCAmelCase_ = tmp_dir.joinpath("train.source" ).open().readlines()
lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_a , _a , 128 , _a )
lowerCAmelCase_ = {x.name for x in tmp_dir.iterdir()}
lowerCAmelCase_ = {x.name for x in save_dir.iterdir()}
lowerCAmelCase_ = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_a ) < len(_a )
assert len(_a ) == 1
assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def __a ( self ) -> Any:
if not FAIRSEQ_AVAILABLE:
return
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=64 )
lowerCAmelCase_ = 64
lowerCAmelCase_ = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a )
lowerCAmelCase_ = [len(_a ) for x in batch_sampler]
assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_a ) == len(_a ) # no dropped or added examples
lowerCAmelCase_ = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for batch in data_loader:
lowerCAmelCase_ = batch["input_ids"].shape
lowerCAmelCase_ = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowerCAmelCase_ = np.product(batch["input_ids"].shape )
num_src_per_batch.append(_a )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_a )
assert num_src_per_batch[0] == max(_a )
if failures:
raise AssertionError(f"too many tokens in {len(_a )} batches" )
def __a ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=512 )
lowerCAmelCase_ = 2
lowerCAmelCase_ = ds.make_sortish_sampler(_a , shuffle=_a )
lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a )
lowerCAmelCase_ = tokenizer.pad_token_id
def count_pad_tokens(_a , _a="input_ids" ):
return [batch[k].eq(_a ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_a , k="labels" ) ) < sum(count_pad_tokens(_a , k="labels" ) )
assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) )
assert len(_a ) == len(_a )
def __a ( self , _a=1000 , _a=128 ) -> str:
if os.getenv("USE_REAL_DATA" , _a ):
lowerCAmelCase_ = "examples/seq2seq/wmt_en_ro"
lowerCAmelCase_ = max_len * 2 * 64
if not Path(_a ).joinpath("train.len" ).exists():
save_len_file(_a , _a )
else:
lowerCAmelCase_ = "examples/seq2seq/test_data/wmt_en_ro"
lowerCAmelCase_ = max_len * 4
save_len_file(_a , _a )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , n_obs=_a , )
return ds, max_tokens, tokenizer
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset()
lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) )
lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) )
assert idsa.intersection(_a ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __a ( self , _a ) -> List[str]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , use_fast=_a )
if tok_name == MBART_TINY:
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
lowerCAmelCase_ = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
lowerCAmelCase_ = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
| 22 | 0 |
from __future__ import annotations
def A(__a: dict , __a: str ):
lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start]
while stack:
lowerCAmelCase_ = stack.pop()
explored.add(__a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__a )
return explored
lowerCamelCase__ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 356 |
def A(__a: Optional[Any] ):
lowerCAmelCase_ = len(__a )
lowerCAmelCase_ = sum(__a )
lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowerCAmelCase_ = True
for i in range(1 , s + 1 ):
lowerCAmelCase_ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowerCAmelCase_ = dp[i][j - 1]
if arr[i - 1] <= j:
lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowerCAmelCase_ = s - 2 * j
break
return diff
| 22 | 0 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def A(__a: int , __a: Any=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def A(__a: int , __a: Tuple=0 ):
lowerCAmelCase_ = []
for old_item in old_list:
lowerCAmelCase_ = old_item.replace("in_layers.0" , "norm1" )
lowerCAmelCase_ = new_item.replace("in_layers.2" , "conv1" )
lowerCAmelCase_ = new_item.replace("out_layers.0" , "norm2" )
lowerCAmelCase_ = new_item.replace("out_layers.3" , "conv2" )
lowerCAmelCase_ = new_item.replace("emb_layers.1" , "time_emb_proj" )
lowerCAmelCase_ = new_item.replace("skip_connection" , "conv_shortcut" )
lowerCAmelCase_ = shave_segments(__a , n_shave_prefix_segments=__a )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def A(__a: int , __a: Union[str, Any]=0 ):
lowerCAmelCase_ = []
for old_item in old_list:
lowerCAmelCase_ = old_item
lowerCAmelCase_ = new_item.replace("norm.weight" , "group_norm.weight" )
lowerCAmelCase_ = new_item.replace("norm.bias" , "group_norm.bias" )
lowerCAmelCase_ = new_item.replace("proj_out.weight" , "proj_attn.weight" )
lowerCAmelCase_ = new_item.replace("proj_out.bias" , "proj_attn.bias" )
lowerCAmelCase_ = shave_segments(__a , n_shave_prefix_segments=__a )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def A(__a: Optional[int] , __a: Any , __a: str , __a: List[Any]=None , __a: List[Any]=None , __a: Union[str, Any]=None ):
assert isinstance(__a , __a ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowerCAmelCase_ = old_checkpoint[path]
lowerCAmelCase_ = old_tensor.shape[0] // 3
lowerCAmelCase_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowerCAmelCase_ = old_tensor.shape[0] // config["num_head_channels"] // 3
lowerCAmelCase_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = old_tensor.split(channels // num_heads , dim=1 )
lowerCAmelCase_ = query.reshape(__a )
lowerCAmelCase_ = key.reshape(__a )
lowerCAmelCase_ = value.reshape(__a )
for path in paths:
lowerCAmelCase_ = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowerCAmelCase_ = new_path.replace("middle_block.0" , "mid_block.resnets.0" )
lowerCAmelCase_ = new_path.replace("middle_block.1" , "mid_block.attentions.0" )
lowerCAmelCase_ = new_path.replace("middle_block.2" , "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
lowerCAmelCase_ = new_path.replace(replacement["old"] , replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowerCAmelCase_ = old_checkpoint[path["old"]][:, :, 0]
else:
lowerCAmelCase_ = old_checkpoint[path["old"]]
def A(__a: Tuple , __a: str ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = checkpoint["time_embed.0.weight"]
lowerCAmelCase_ = checkpoint["time_embed.0.bias"]
lowerCAmelCase_ = checkpoint["time_embed.2.weight"]
lowerCAmelCase_ = checkpoint["time_embed.2.bias"]
lowerCAmelCase_ = checkpoint["input_blocks.0.0.weight"]
lowerCAmelCase_ = checkpoint["input_blocks.0.0.bias"]
lowerCAmelCase_ = checkpoint["out.0.weight"]
lowerCAmelCase_ = checkpoint["out.0.bias"]
lowerCAmelCase_ = checkpoint["out.2.weight"]
lowerCAmelCase_ = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(__a )
}
# Retrieves the keys for the middle blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(__a )
}
# Retrieves the keys for the output blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(__a )
}
for i in range(1 , __a ):
lowerCAmelCase_ = (i - 1) // (config["num_res_blocks"] + 1)
lowerCAmelCase_ = (i - 1) % (config["num_res_blocks"] + 1)
lowerCAmelCase_ = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
lowerCAmelCase_ = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
lowerCAmelCase_ = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
lowerCAmelCase_ = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
lowerCAmelCase_ = renew_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"input_blocks.{i}.0", "new": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
lowerCAmelCase_ = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
__a , __a , __a , additional_replacements=[meta_path, resnet_op] , config=__a )
if len(__a ):
lowerCAmelCase_ = renew_attention_paths(__a )
lowerCAmelCase_ = {
"old": F"input_blocks.{i}.1",
"new": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
lowerCAmelCase_ = {
F"input_blocks.{i}.1.qkv.bias": {
"key": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"query": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"value": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"key": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"query": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"value": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
__a , __a , __a , additional_replacements=[meta_path] , attention_paths_to_split=__a , config=__a , )
lowerCAmelCase_ = middle_blocks[0]
lowerCAmelCase_ = middle_blocks[1]
lowerCAmelCase_ = middle_blocks[2]
lowerCAmelCase_ = renew_resnet_paths(__a )
assign_to_checkpoint(__a , __a , __a , config=__a )
lowerCAmelCase_ = renew_resnet_paths(__a )
assign_to_checkpoint(__a , __a , __a , config=__a )
lowerCAmelCase_ = renew_attention_paths(__a )
lowerCAmelCase_ = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
__a , __a , __a , attention_paths_to_split=__a , config=__a )
for i in range(__a ):
lowerCAmelCase_ = i // (config["num_res_blocks"] + 1)
lowerCAmelCase_ = i % (config["num_res_blocks"] + 1)
lowerCAmelCase_ = [shave_segments(__a , 2 ) for name in output_blocks[i]]
lowerCAmelCase_ = {}
for layer in output_block_layers:
lowerCAmelCase_ , lowerCAmelCase_ = layer.split("." )[0], shave_segments(__a , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__a )
else:
lowerCAmelCase_ = [layer_name]
if len(__a ) > 1:
lowerCAmelCase_ = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
lowerCAmelCase_ = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
lowerCAmelCase_ = renew_resnet_paths(__a )
lowerCAmelCase_ = renew_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"output_blocks.{i}.0", "new": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowerCAmelCase_ = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
lowerCAmelCase_ = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
lowerCAmelCase_ = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(__a ) == 2:
lowerCAmelCase_ = []
if len(__a ):
lowerCAmelCase_ = renew_attention_paths(__a )
lowerCAmelCase_ = {
"old": F"output_blocks.{i}.1",
"new": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
lowerCAmelCase_ = {
F"output_blocks.{i}.1.qkv.bias": {
"key": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"query": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"value": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"key": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"query": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"value": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
__a , __a , __a , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=__a , )
else:
lowerCAmelCase_ = renew_resnet_paths(__a , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowerCAmelCase_ = ".".join(["output_blocks", str(__a ), path["old"]] )
lowerCAmelCase_ = ".".join(["up_blocks", str(__a ), "resnets", str(__a ), path["new"]] )
lowerCAmelCase_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
lowerCamelCase__ = json.loads(f.read())
lowerCamelCase__ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
lowerCamelCase__ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
lowerCamelCase__ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
lowerCamelCase__ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
lowerCamelCase__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 357 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def A(__a: Any , __a: Union[str, Any] , __a: List[str] ):
lowerCAmelCase_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase_ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowerCAmelCase_ = F"{src_lang}-{tgt_lang}"
lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(__a , exist_ok=__a )
lowerCAmelCase_ = os.path.join(__a , "README.md" )
print(F"Generating {path}" )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(__a )
# make sure we are under the root of the project
lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCamelCase__ = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''')
lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 22 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''Speech2TextFeatureExtractor'''
lowerCamelCase__ = '''Speech2TextTokenizer'''
def __init__( self , _a , _a ) -> List[str]:
super().__init__(_a , _a )
lowerCAmelCase_ = self.feature_extractor
lowerCAmelCase_ = False
def __call__( self , *_a , **_a ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowerCAmelCase_ = kwargs.pop("raw_speech" )
else:
lowerCAmelCase_ = kwargs.pop("audio" , _a )
lowerCAmelCase_ = kwargs.pop("sampling_rate" , _a )
lowerCAmelCase_ = kwargs.pop("text" , _a )
if len(_a ) > 0:
lowerCAmelCase_ = args[0]
lowerCAmelCase_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowerCAmelCase_ = self.feature_extractor(_a , *_a , sampling_rate=_a , **_a )
if text is not None:
lowerCAmelCase_ = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase_ = encodings["input_ids"]
return inputs
def __a ( self , *_a , **_a ) -> Any:
return self.tokenizer.batch_decode(*_a , **_a )
def __a ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def __a ( self ) -> Optional[int]:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowerCAmelCase_ = True
lowerCAmelCase_ = self.tokenizer
yield
lowerCAmelCase_ = self.feature_extractor
lowerCAmelCase_ = False
| 358 |
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def A(__a: str ):
re.sub("<n>" , "" , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) )
| 22 | 0 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCamelCase__ = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def A(__a: Any ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
lowerCAmelCase_ = list(s_dict.keys() )
for key in keys:
lowerCAmelCase_ = r".*/layers_(\d+)"
lowerCAmelCase_ = key
if re.match(__a , __a ):
lowerCAmelCase_ = re.sub(r"layers_(\d+)" , r"block/\1/layer" , __a )
lowerCAmelCase_ = r"(encoder|decoder)\/"
if re.match(__a , __a ):
lowerCAmelCase_ = re.match(__a , __a ).groups()
if groups[0] == "encoder":
lowerCAmelCase_ = re.sub(r"/mlp/" , r"/1/mlp/" , __a )
lowerCAmelCase_ = re.sub(r"/pre_mlp_layer_norm/" , r"/1/layer_norm/" , __a )
elif groups[0] == "decoder":
lowerCAmelCase_ = re.sub(r"/mlp/" , r"/2/mlp/" , __a )
lowerCAmelCase_ = re.sub(r"/pre_mlp_layer_norm/" , r"/2/layer_norm/" , __a )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCAmelCase_ = new_key.replace(__a , __a )
print(F"{key} -> {new_key}" )
lowerCAmelCase_ = s_dict.pop(__a )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase_ = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase_ = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowerCAmelCase_ = s_dict[key].shape[0]
lowerCAmelCase_ = s_dict[key]
for idx in range(__a ):
lowerCAmelCase_ = expert_weihts[idx]
print(F"{key} -> {key.replace('expert/' , 'nested fstring' )}" )
s_dict.pop(__a )
return s_dict
lowerCamelCase__ = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def A(__a: Optional[int] , __a: Union[str, Any] ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(__a , "r" ) as f:
lowerCAmelCase_ = f.read()
lowerCAmelCase_ = re.findall(r"(.*) = ([0-9.]*)" , __a )
lowerCAmelCase_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCAmelCase_ = float(__a ) if "." in value else int(__a )
lowerCAmelCase_ = re.findall(r"(.*activations) = \(\'(.*)\',\)" , __a )[0]
lowerCAmelCase_ = str(activation[1] )
lowerCAmelCase_ = num_experts
lowerCAmelCase_ = SwitchTransformersConfig(**__a )
return config
def A(__a: List[Any] , __a: int , __a: Union[str, Any]=None , __a: Union[str, Any]="./" , __a: Dict=8 ):
# Initialise PyTorch model
print(F"Loading flax weights from : {flax_checkpoint_path}" )
lowerCAmelCase_ = checkpoints.load_tax_checkpoint(__a )
if gin_file is not None:
lowerCAmelCase_ = convert_gin_to_config(__a , __a )
else:
lowerCAmelCase_ = SwitchTransformersConfig.from_pretrained(__a )
lowerCAmelCase_ = SwitchTransformersForConditionalGeneration(__a )
lowerCAmelCase_ = flax_params["target"]
lowerCAmelCase_ = flatten_dict(__a , sep="/" )
lowerCAmelCase_ = rename_keys(__a )
lowerCAmelCase_ = unflatten_dict(__a , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__a , __a )
print(F"Save PyTorch model to {pytorch_dump_path}" )
pt_model.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
lowerCamelCase__ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 359 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''trocr'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , _a=50265 , _a=1024 , _a=12 , _a=16 , _a=4096 , _a="gelu" , _a=512 , _a=0.1 , _a=0.0 , _a=0.0 , _a=2 , _a=0.0_2 , _a=0.0 , _a=True , _a=False , _a=True , _a=True , _a=1 , _a=0 , _a=2 , **_a , ) -> Dict:
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = d_model
lowerCAmelCase_ = decoder_layers
lowerCAmelCase_ = decoder_attention_heads
lowerCAmelCase_ = decoder_ffn_dim
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = activation_dropout
lowerCAmelCase_ = init_std
lowerCAmelCase_ = decoder_layerdrop
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = scale_embedding
lowerCAmelCase_ = use_learned_position_embeddings
lowerCAmelCase_ = layernorm_embedding
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , **_a , )
| 360 |
import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''bertabs'''
def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]:
super().__init__(**_a )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_pos
lowerCAmelCase_ = enc_layers
lowerCAmelCase_ = enc_hidden_size
lowerCAmelCase_ = enc_heads
lowerCAmelCase_ = enc_ff_size
lowerCAmelCase_ = enc_dropout
lowerCAmelCase_ = dec_layers
lowerCAmelCase_ = dec_hidden_size
lowerCAmelCase_ = dec_heads
lowerCAmelCase_ = dec_ff_size
lowerCAmelCase_ = dec_dropout
| 22 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 361 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A(__a: Tuple , __a: Union[str, Any] ):
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = {}
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["quant_conv.bias"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a )
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a )
}
for i in range(__a ):
lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
for i in range(__a ):
lowerCAmelCase_ = num_up_blocks - 1 - i
lowerCAmelCase_ = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
return new_checkpoint
def A(__a: str , __a: str , ):
# Only support V1
lowerCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
lowerCAmelCase_ = io.BytesIO(r.content )
lowerCAmelCase_ = OmegaConf.load(__a )
lowerCAmelCase_ = 512
lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
lowerCAmelCase_ = {}
with safe_open(__a , framework="pt" , device="cpu" ) as f:
for key in f.keys():
lowerCAmelCase_ = f.get_tensor(__a )
else:
lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"]
# Convert the VAE model.
lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a )
lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a )
lowerCAmelCase_ = AutoencoderKL(**__a )
vae.load_state_dict(__a )
vae.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
lowerCamelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 22 | 0 |
from __future__ import annotations
def A(__a: str , __a: list[str] | None = None ):
lowerCAmelCase_ = word_bank or []
# create a table
lowerCAmelCase_ = len(__a ) + 1
lowerCAmelCase_ = []
for _ in range(__a ):
table.append([] )
# seed value
lowerCAmelCase_ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__a ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__a )] == word:
lowerCAmelCase_ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__a )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__a )]:
combination.reverse()
return table[len(__a )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 362 |
def A():
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCamelCase__ = generate_large_matrix()
lowerCamelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A(__a: list[list[int]] ):
assert all(row == sorted(__a , reverse=__a ) for row in grid )
assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) )
def A(__a: list[int] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(__a ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCAmelCase_ = (left + right) // 2
lowerCAmelCase_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCAmelCase_ = mid + 1
else:
lowerCAmelCase_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__a )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(grid[0] )
for i in range(len(__a ) ):
lowerCAmelCase_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__a ) * len(grid[0] )) - total
def A(__a: list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
for row in grid:
for i, number in enumerate(__a ):
if number < 0:
total += len(__a ) - i
break
return total
def A():
from timeit import timeit
print("Running benchmarks" )
lowerCAmelCase_ = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 22 | 0 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase__ = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def A(__a: str , __a: List[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
return (preds == labels).mean()
def A(__a: Any , __a: Any ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = simple_accuracy(__a , __a )
lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def A(__a: List[str] , __a: Optional[int] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = pearsonr(__a , __a )[0]
lowerCAmelCase_ = spearmanr(__a , __a )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def A(__a: Union[str, Any] , __a: Any , __a: str ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(__a , __a )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "mrpc":
return acc_and_fa(__a , __a )
elif task_name == "sts-b":
return pearson_and_spearman(__a , __a )
elif task_name == "qqp":
return acc_and_fa(__a , __a )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__a , __a )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__a , __a )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "rte":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "hans":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
if len(__a ) != len(__a ):
raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a ) | 363 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def A(__a: Dict ):
lowerCAmelCase_ = r"\w+[.]\d+"
lowerCAmelCase_ = re.findall(__a , __a )
for pat in pats:
lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def A(__a: str , __a: Tuple , __a: List[Any] ):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A(__a: Dict , __a: Any , __a: List[Any]=42 ):
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) )
lowerCAmelCase_ = flatten_dict(__a )
lowerCAmelCase_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase_ = rename_key(__a )
lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(__a )
return unflatten_dict(__a )
| 22 | 0 |
def A(__a: int ):
if isinstance(__a , __a ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(__a , __a ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
lowerCAmelCase_ = False
if num < 0:
lowerCAmelCase_ = True
lowerCAmelCase_ = -num
lowerCAmelCase_ = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__a ) for e in binary )
return "0b" + "".join(str(__a ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''deta'''
lowerCamelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _a=None , _a=900 , _a=2048 , _a=6 , _a=2048 , _a=8 , _a=6 , _a=1024 , _a=8 , _a=0.0 , _a=True , _a="relu" , _a=256 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.0_2 , _a=1.0 , _a=True , _a=False , _a="sine" , _a=5 , _a=4 , _a=4 , _a=True , _a=300 , _a=True , _a=True , _a=1 , _a=5 , _a=2 , _a=1 , _a=1 , _a=5 , _a=2 , _a=0.1 , _a=0.2_5 , **_a , ) -> List[Any]:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowerCAmelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(_a , _a ):
lowerCAmelCase_ = backbone_config.pop("model_type" )
lowerCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ = config_class.from_dict(_a )
lowerCAmelCase_ = backbone_config
lowerCAmelCase_ = num_queries
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = d_model
lowerCAmelCase_ = encoder_ffn_dim
lowerCAmelCase_ = encoder_layers
lowerCAmelCase_ = encoder_attention_heads
lowerCAmelCase_ = decoder_ffn_dim
lowerCAmelCase_ = decoder_layers
lowerCAmelCase_ = decoder_attention_heads
lowerCAmelCase_ = dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = activation_dropout
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = init_std
lowerCAmelCase_ = init_xavier_std
lowerCAmelCase_ = encoder_layerdrop
lowerCAmelCase_ = auxiliary_loss
lowerCAmelCase_ = position_embedding_type
# deformable attributes
lowerCAmelCase_ = num_feature_levels
lowerCAmelCase_ = encoder_n_points
lowerCAmelCase_ = decoder_n_points
lowerCAmelCase_ = two_stage
lowerCAmelCase_ = two_stage_num_proposals
lowerCAmelCase_ = with_box_refine
lowerCAmelCase_ = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowerCAmelCase_ = class_cost
lowerCAmelCase_ = bbox_cost
lowerCAmelCase_ = giou_cost
# Loss coefficients
lowerCAmelCase_ = mask_loss_coefficient
lowerCAmelCase_ = dice_loss_coefficient
lowerCAmelCase_ = bbox_loss_coefficient
lowerCAmelCase_ = giou_loss_coefficient
lowerCAmelCase_ = eos_coefficient
lowerCAmelCase_ = focal_alpha
super().__init__(is_encoder_decoder=_a , **_a )
@property
def __a ( self ) -> int:
return self.encoder_attention_heads
@property
def __a ( self ) -> int:
return self.d_model
def __a ( self ) -> Tuple:
lowerCAmelCase_ = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ = self.backbone_config.to_dict()
lowerCAmelCase_ = self.__class__.model_type
return output
| 365 |
import math
def A(__a: int ):
return math.sqrt(__a ) * math.sqrt(__a ) == num
def A(__a: int ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = n
while left <= right:
lowerCAmelCase_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase_ = mid - 1
else:
lowerCAmelCase_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''align_text_model'''
def __init__( self , _a=30522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.0_2 , _a=1E-12 , _a=0 , _a="absolute" , _a=True , **_a , ) -> List[str]:
super().__init__(**_a )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = pad_token_id
@classmethod
def __a ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
lowerCAmelCase_ , lowerCAmelCase_ = cls.get_config_dict(_a , **_a )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
lowerCAmelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_a , **_a )
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''align_vision_model'''
def __init__( self , _a = 3 , _a = 600 , _a = 2.0 , _a = 3.1 , _a = 8 , _a = [3, 3, 5, 3, 5, 5, 3] , _a = [32, 16, 24, 40, 80, 112, 192] , _a = [16, 24, 40, 80, 112, 192, 320] , _a = [] , _a = [1, 2, 2, 2, 1, 2, 1] , _a = [1, 2, 2, 3, 3, 4, 1] , _a = [1, 6, 6, 6, 6, 6, 6] , _a = 0.2_5 , _a = "swish" , _a = 2560 , _a = "mean" , _a = 0.0_2 , _a = 0.0_0_1 , _a = 0.9_9 , _a = 0.2 , **_a , ) -> int:
super().__init__(**_a )
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = width_coefficient
lowerCAmelCase_ = depth_coefficient
lowerCAmelCase_ = depth_divisor
lowerCAmelCase_ = kernel_sizes
lowerCAmelCase_ = in_channels
lowerCAmelCase_ = out_channels
lowerCAmelCase_ = depthwise_padding
lowerCAmelCase_ = strides
lowerCAmelCase_ = num_block_repeats
lowerCAmelCase_ = expand_ratios
lowerCAmelCase_ = squeeze_expansion_ratio
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dim
lowerCAmelCase_ = pooling_type
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = batch_norm_eps
lowerCAmelCase_ = batch_norm_momentum
lowerCAmelCase_ = drop_connect_rate
lowerCAmelCase_ = sum(_a ) * 4
@classmethod
def __a ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
lowerCAmelCase_ , lowerCAmelCase_ = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
lowerCAmelCase_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_a , **_a )
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''align'''
lowerCamelCase__ = True
def __init__( self , _a=None , _a=None , _a=640 , _a=1.0 , _a=0.0_2 , **_a , ) -> Any:
super().__init__(**_a )
if text_config is None:
lowerCAmelCase_ = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
lowerCAmelCase_ = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
lowerCAmelCase_ = AlignTextConfig(**_a )
lowerCAmelCase_ = AlignVisionConfig(**_a )
lowerCAmelCase_ = projection_dim
lowerCAmelCase_ = temperature_init_value
lowerCAmelCase_ = initializer_range
@classmethod
def __a ( cls , _a , _a , **_a ) -> Optional[int]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ = self.text_config.to_dict()
lowerCAmelCase_ = self.vision_config.to_dict()
lowerCAmelCase_ = self.__class__.model_type
return output
| 366 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def A(__a: Dict , __a: List[str]=None ):
require_version(deps[pkg] , __a )
| 22 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase__ = {'''tokenization_byt5''': ['''ByT5Tokenizer''']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 367 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ''' Hello world! cécé herlolip'''
lowerCamelCase__ = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def A(__a: Any ):
lowerCAmelCase_ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ):
lowerCAmelCase_ = dct.pop(__a )
lowerCAmelCase_ = val
def A(__a: Tuple ):
lowerCAmelCase_ = torch.load(__a , map_location="cpu" )
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def A(__a: List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape
lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a )
lowerCAmelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ):
if not os.path.exists(__a ):
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval()
else:
lowerCAmelCase_ = load_xsum_checkpoint(__a )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
lowerCAmelCase_ = checkpoint_path.replace("." , "-" )
lowerCAmelCase_ = BartConfig.from_pretrained(__a )
lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 )
lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(__a , __a ).all():
raise ValueError(
F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
lowerCAmelCase_ = bart.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(__a , __a , __a )
lowerCAmelCase_ = BartForSequenceClassification(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a )
lowerCAmelCase_ = model(__a )[0] # logits
else: # no classification heads to worry about
lowerCAmelCase_ = bart.model.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"]
lowerCAmelCase_ = bart.extract_features(__a )
if hf_checkpoint_name == "facebook/bart-large":
lowerCAmelCase_ = BartModel(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = model(__a ).model[0]
else:
lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt
model.model.load_state_dict(__a )
if hasattr(__a , "lm_head" ):
lowerCAmelCase_ = make_linear_from_emb(model.model.shared )
lowerCAmelCase_ = model.model(__a )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
lowerCamelCase__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 22 | 0 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __magic_name__ (tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a = None , _a = None ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = max_length
lowerCAmelCase_ = vocab
lowerCAmelCase_ = merges
lowerCAmelCase_ = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def __a ( cls , _a , *_a , **_a ) -> List[Any]:
lowerCAmelCase_ = [" ".join(_a ) for m in tokenizer.bpe_ranks.keys()]
lowerCAmelCase_ = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def __a ( cls , _a , *_a , **_a ) -> List[str]:
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def __a ( cls , _a ) -> Tuple:
return cls(**_a )
def __a ( self ) -> List[str]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __a ( self , _a , _a = None ) -> List[str]:
lowerCAmelCase_ = self.tf_tokenizer(_a )
lowerCAmelCase_ = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCAmelCase_ = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCAmelCase_ , lowerCAmelCase_ = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 368 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = MobileBertTokenizer
lowerCamelCase__ = MobileBertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = filter_non_english
lowerCamelCase__ = '''google/mobilebert-uncased'''
def __a ( self ) -> Optional[Any]:
super().setUp()
lowerCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowerCAmelCase_ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __a ( self , _a ) -> Any:
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = "unwanted, running"
return input_text, output_text
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def __a ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __a ( self ) -> Dict:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCAmelCase_ = {}
for i, token in enumerate(_a ):
lowerCAmelCase_ = i
lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __a ( self ) -> Optional[int]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __a ( self ) -> List[str]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __a ( self ) -> Dict:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __a ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCAmelCase_ = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False
lowerCAmelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = ["的", "人", "有"]
lowerCAmelCase_ = "".join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = True
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = False
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 22 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def A(__a: Dict , __a: Optional[Any] , __a: Union[str, Any] ):
# Initialise PyTorch model
lowerCAmelCase_ = MobileBertConfig.from_json_file(__a )
print(F"Building PyTorch model from configuration: {config}" )
lowerCAmelCase_ = MobileBertForPreTraining(__a )
# Load weights from tf checkpoint
lowerCAmelCase_ = load_tf_weights_in_mobilebert(__a , __a , __a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 369 |
import math
from collections.abc import Iterator
from itertools import takewhile
def A(__a: int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A():
lowerCAmelCase_ = 2
while True:
if is_prime(__a ):
yield num
num += 1
def A(__a: int = 200_0000 ):
return sum(takewhile(lambda __a : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 22 | 0 |
lowerCamelCase__ = {
'''meter''': '''m''',
'''kilometer''': '''km''',
'''megametre''': '''Mm''',
'''gigametre''': '''Gm''',
'''terametre''': '''Tm''',
'''petametre''': '''Pm''',
'''exametre''': '''Em''',
'''zettametre''': '''Zm''',
'''yottametre''': '''Ym''',
}
# Exponent of the factor(meter)
lowerCamelCase__ = {
'''m''': 0,
'''km''': 3,
'''Mm''': 6,
'''Gm''': 9,
'''Tm''': 12,
'''Pm''': 15,
'''Em''': 18,
'''Zm''': 21,
'''Ym''': 24,
}
def A(__a: float , __a: str , __a: str ):
lowerCAmelCase_ = from_type.lower().strip("s" )
lowerCAmelCase_ = to_type.lower().strip("s" )
lowerCAmelCase_ = UNIT_SYMBOL.get(__a , __a )
lowerCAmelCase_ = UNIT_SYMBOL.get(__a , __a )
if from_sanitized not in METRIC_CONVERSION:
lowerCAmelCase_ = (
F"Invalid 'from_type' value: {from_type!r}.\n"
F"Conversion abbreviations are: {', '.join(__a )}"
)
raise ValueError(__a )
if to_sanitized not in METRIC_CONVERSION:
lowerCAmelCase_ = (
F"Invalid 'to_type' value: {to_type!r}.\n"
F"Conversion abbreviations are: {', '.join(__a )}"
)
raise ValueError(__a )
lowerCAmelCase_ = METRIC_CONVERSION[from_sanitized]
lowerCAmelCase_ = METRIC_CONVERSION[to_sanitized]
lowerCAmelCase_ = 1
if from_exponent > to_exponent:
lowerCAmelCase_ = from_exponent - to_exponent
else:
lowerCAmelCase_ = -(to_exponent - from_exponent)
return value * pow(10 , __a )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 370 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''mobilenet_v2'''
def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = depth_multiplier
lowerCAmelCase_ = depth_divisible_by
lowerCAmelCase_ = min_depth
lowerCAmelCase_ = expand_ratio
lowerCAmelCase_ = output_stride
lowerCAmelCase_ = first_layer_is_expansion
lowerCAmelCase_ = finegrained_output
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = tf_padding
lowerCAmelCase_ = classifier_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = semantic_loss_ignore_index
class __magic_name__ (__lowercase ):
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __a ( self ) -> float:
return 1E-4
| 22 | 0 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def A(__a: Any , __a: Union[str, Any] , __a: List[str] ):
lowerCAmelCase_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase_ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowerCAmelCase_ = F"{src_lang}-{tgt_lang}"
lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(__a , exist_ok=__a )
lowerCAmelCase_ = os.path.join(__a , "README.md" )
print(F"Generating {path}" )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(__a )
# make sure we are under the root of the project
lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCamelCase__ = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''')
lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 371 |
from __future__ import annotations
def A(__a: dict , __a: str ):
lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start]
while stack:
lowerCAmelCase_ = stack.pop()
explored.add(__a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__a )
return explored
lowerCamelCase__ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 22 | 0 |
import argparse
import json
from tqdm import tqdm
def A():
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__a , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__a , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__a , help="where to store parsed gold_data_path file" , )
lowerCAmelCase_ = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
lowerCAmelCase_ = json.load(__a )
for dpr_record in tqdm(__a ):
lowerCAmelCase_ = dpr_record["question"]
lowerCAmelCase_ = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__a ) + "\n" )
if __name__ == "__main__":
main()
| 350 |
def A(__a: Tuple ):
lowerCAmelCase_ = len(__a )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )]
# Reverse whole list
lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 22 | 0 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def A(__a: Dict ):
lowerCAmelCase_ = r"\w+[.]\d+"
lowerCAmelCase_ = re.findall(__a , __a )
for pat in pats:
lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def A(__a: str , __a: Tuple , __a: List[Any] ):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A(__a: Dict , __a: Any , __a: List[Any]=42 ):
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) )
lowerCAmelCase_ = flatten_dict(__a )
lowerCAmelCase_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase_ = rename_key(__a )
lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(__a )
return unflatten_dict(__a )
| 351 |
import string
from math import logaa
def A(__a: str , __a: str ):
lowerCAmelCase_ = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
lowerCAmelCase_ = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A(__a: str , __a: str ):
lowerCAmelCase_ = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCAmelCase_ = corpus_without_punctuation.split("\n" )
lowerCAmelCase_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__a ))
def A(__a: int , __a: int , __a: List[Any]=False ):
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def A(__a: int , __a: int ):
return round(tf * idf , 3 )
| 22 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''unispeech'''
def __init__( self , _a=32 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.1 , _a=0.1 , _a=0.0_2 , _a=1E-5 , _a="group" , _a="gelu" , _a=(512, 512, 512, 512, 512, 512, 512) , _a=(5, 2, 2, 2, 2, 2, 2) , _a=(10, 3, 3, 3, 3, 2, 2) , _a=False , _a=128 , _a=16 , _a=False , _a=True , _a=0.0_5 , _a=10 , _a=2 , _a=0.0 , _a=10 , _a=0 , _a=320 , _a=2 , _a=0.1 , _a=100 , _a=256 , _a=256 , _a=0.1 , _a="mean" , _a=False , _a=False , _a=256 , _a=80 , _a=0 , _a=1 , _a=2 , _a=0.5 , **_a , ) -> Dict:
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = feat_extract_norm
lowerCAmelCase_ = feat_extract_activation
lowerCAmelCase_ = list(_a )
lowerCAmelCase_ = list(_a )
lowerCAmelCase_ = list(_a )
lowerCAmelCase_ = conv_bias
lowerCAmelCase_ = num_conv_pos_embeddings
lowerCAmelCase_ = num_conv_pos_embedding_groups
lowerCAmelCase_ = len(self.conv_dim )
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = activation_dropout
lowerCAmelCase_ = feat_proj_dropout
lowerCAmelCase_ = final_dropout
lowerCAmelCase_ = layerdrop
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_ctc_classes
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = do_stable_layer_norm
lowerCAmelCase_ = use_weighted_layer_sum
lowerCAmelCase_ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase_ = apply_spec_augment
lowerCAmelCase_ = mask_time_prob
lowerCAmelCase_ = mask_time_length
lowerCAmelCase_ = mask_time_min_masks
lowerCAmelCase_ = mask_feature_prob
lowerCAmelCase_ = mask_feature_length
lowerCAmelCase_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase_ = num_codevectors_per_group
lowerCAmelCase_ = num_codevector_groups
lowerCAmelCase_ = contrastive_logits_temperature
lowerCAmelCase_ = feat_quantizer_dropout
lowerCAmelCase_ = num_negatives
lowerCAmelCase_ = codevector_dim
lowerCAmelCase_ = proj_codevector_dim
lowerCAmelCase_ = diversity_loss_weight
# ctc loss
lowerCAmelCase_ = ctc_loss_reduction
lowerCAmelCase_ = ctc_zero_infinity
# pretraining loss
lowerCAmelCase_ = replace_prob
@property
def __a ( self ) -> str:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 352 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase__ = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def A(__a: str , __a: List[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
return (preds == labels).mean()
def A(__a: Any , __a: Any ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = simple_accuracy(__a , __a )
lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def A(__a: List[str] , __a: Optional[int] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = pearsonr(__a , __a )[0]
lowerCAmelCase_ = spearmanr(__a , __a )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def A(__a: Union[str, Any] , __a: Any , __a: str ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(__a , __a )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "mrpc":
return acc_and_fa(__a , __a )
elif task_name == "sts-b":
return pearson_and_spearman(__a , __a )
elif task_name == "qqp":
return acc_and_fa(__a , __a )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__a , __a )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__a , __a )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "rte":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "hans":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
if len(__a ) != len(__a ):
raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
| 22 | 0 |
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def A(__a: Dict , __a: List[str]=None ):
require_version(deps[pkg] , __a )
| 353 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViTImageProcessor'''
lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , _a=None , _a=None , **_a ) -> Tuple:
lowerCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _a , )
lowerCAmelCase_ = kwargs.pop("feature_extractor" )
lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_a , _a )
def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict:
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a )
if visual_prompt is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if images is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if visual_prompt is not None and images is not None:
lowerCAmelCase_ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase_ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def __a ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.batch_decode(*_a , **_a )
def __a ( self , *_a , **_a ) -> Optional[int]:
return self.tokenizer.decode(*_a , **_a )
@property
def __a ( self ) -> List[str]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , )
return self.image_processor_class
@property
def __a ( self ) -> Optional[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , )
return self.image_processor
| 22 | 0 |
import colorsys
from PIL import Image # type: ignore
def A(__a: float , __a: float , __a: int ):
lowerCAmelCase_ = x
lowerCAmelCase_ = y
for step in range(__a ): # noqa: B007
lowerCAmelCase_ = a * a - b * b + x
lowerCAmelCase_ = 2 * a * b + y
lowerCAmelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def A(__a: float ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def A(__a: float ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__a , 1 , 1 ) )
def A(__a: int = 800 , __a: int = 600 , __a: float = -0.6 , __a: float = 0 , __a: float = 3.2 , __a: int = 50 , __a: bool = True , ):
lowerCAmelCase_ = Image.new("RGB" , (image_width, image_height) )
lowerCAmelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(__a ):
for image_y in range(__a ):
# determine the figure-coordinates based on the image-coordinates
lowerCAmelCase_ = figure_width / image_width * image_height
lowerCAmelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCAmelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCAmelCase_ = get_distance(__a , __a , __a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCAmelCase_ = get_color_coded_rgb(__a )
else:
lowerCAmelCase_ = get_black_and_white_rgb(__a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 354 |
import datasets
lowerCamelCase__ = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
lowerCamelCase__ = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
lowerCamelCase__ = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def A(__a: Dict , __a: Union[str, Any] ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
def __a ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def __a ( self , _a , _a ) -> List[str]:
return {"accuracy": simple_accuracy(_a , _a )}
| 22 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''YolosFeatureExtractor''']
lowerCamelCase__ = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 355 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowerCamelCase__ = '''bert-base-cased'''
lowerCamelCase__ = '''google/pegasus-xsum'''
lowerCamelCase__ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowerCamelCase__ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random'''
lowerCamelCase__ = '''sshleifer/bart-tiny-random'''
lowerCamelCase__ = '''sshleifer/tiny-mbart'''
lowerCamelCase__ = '''sshleifer/tiny-marian-en-de'''
def A(__a: Path , __a: list ):
lowerCAmelCase_ = "\n".join(__a )
Path(__a ).open("w" ).writelines(__a )
def A(__a: str ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__a , F"{split}.source" ) , __a )
_dump_articles(os.path.join(__a , F"{split}.target" ) , __a )
return tmp_dir
class __magic_name__ (__lowercase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __a ( self , _a ) -> Dict:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
lowerCAmelCase_ = 4
lowerCAmelCase_ = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowerCAmelCase_ , lowerCAmelCase_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , )
lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_a , _a )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowerCAmelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __a ( self , _a ) -> str:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
lowerCAmelCase_ = 4
lowerCAmelCase_ = LegacySeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=20 , max_target_length=_a , )
lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowerCAmelCase_ = tmp_dir.joinpath("train.source" ).open().readlines()
lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_a , _a , 128 , _a )
lowerCAmelCase_ = {x.name for x in tmp_dir.iterdir()}
lowerCAmelCase_ = {x.name for x in save_dir.iterdir()}
lowerCAmelCase_ = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_a ) < len(_a )
assert len(_a ) == 1
assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def __a ( self ) -> Any:
if not FAIRSEQ_AVAILABLE:
return
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=64 )
lowerCAmelCase_ = 64
lowerCAmelCase_ = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a )
lowerCAmelCase_ = [len(_a ) for x in batch_sampler]
assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_a ) == len(_a ) # no dropped or added examples
lowerCAmelCase_ = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for batch in data_loader:
lowerCAmelCase_ = batch["input_ids"].shape
lowerCAmelCase_ = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowerCAmelCase_ = np.product(batch["input_ids"].shape )
num_src_per_batch.append(_a )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_a )
assert num_src_per_batch[0] == max(_a )
if failures:
raise AssertionError(f"too many tokens in {len(_a )} batches" )
def __a ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=512 )
lowerCAmelCase_ = 2
lowerCAmelCase_ = ds.make_sortish_sampler(_a , shuffle=_a )
lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 )
lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a )
lowerCAmelCase_ = tokenizer.pad_token_id
def count_pad_tokens(_a , _a="input_ids" ):
return [batch[k].eq(_a ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_a , k="labels" ) ) < sum(count_pad_tokens(_a , k="labels" ) )
assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) )
assert len(_a ) == len(_a )
def __a ( self , _a=1000 , _a=128 ) -> str:
if os.getenv("USE_REAL_DATA" , _a ):
lowerCAmelCase_ = "examples/seq2seq/wmt_en_ro"
lowerCAmelCase_ = max_len * 2 * 64
if not Path(_a ).joinpath("train.len" ).exists():
save_len_file(_a , _a )
else:
lowerCAmelCase_ = "examples/seq2seq/test_data/wmt_en_ro"
lowerCAmelCase_ = max_len * 4
save_len_file(_a , _a )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a )
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , n_obs=_a , )
return ds, max_tokens, tokenizer
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset()
lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) )
lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) )
assert idsa.intersection(_a ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __a ( self , _a ) -> List[str]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , use_fast=_a )
if tok_name == MBART_TINY:
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
lowerCAmelCase_ = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowerCAmelCase_ = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
lowerCAmelCase_ = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
| 22 | 0 |
from __future__ import annotations
def A(__a: float , __a: float , __a: float ):
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def A(__a: float , __a: float , __a: float , ):
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def A(__a: float , __a: float , __a: float , ):
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
__a , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
def A(__a: Optional[Any] ):
lowerCAmelCase_ = len(__a )
lowerCAmelCase_ = sum(__a )
lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowerCAmelCase_ = True
for i in range(1 , s + 1 ):
lowerCAmelCase_ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowerCAmelCase_ = dp[i][j - 1]
if arr[i - 1] <= j:
lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowerCAmelCase_ = s - 2 * j
break
return diff
| 22 | 0 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __magic_name__ :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.0_2 , _a=3 , _a=4 , _a=None , ) -> List[Any]:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_input_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_choices
lowerCAmelCase_ = scope
def __a ( self ) -> int:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = None
if self.use_input_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ = None
if self.use_token_type_ids:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self ) -> Tuple:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
lowerCAmelCase_ = LlamaModel(config=_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a , attention_mask=_a )
lowerCAmelCase_ = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> List[Any]:
lowerCAmelCase_ = True
lowerCAmelCase_ = LlamaModel(_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
lowerCAmelCase_ = model(
_a , attention_mask=_a , encoder_hidden_states=_a , )
lowerCAmelCase_ = model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> str:
lowerCAmelCase_ = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> int:
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
lowerCAmelCase_ = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , use_cache=_a , )
lowerCAmelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase_ = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , output_hidden_states=_a , )["hidden_states"][0]
lowerCAmelCase_ = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , past_key_values=_a , output_hidden_states=_a , )["hidden_states"][0]
# select random slice
lowerCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1E-3 ) )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) = config_and_inputs
lowerCAmelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ (__lowercase , __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase__ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowerCamelCase__ = (LlamaForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = LlamaModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_a , hidden_size=37 )
def __a ( self ) -> int:
self.config_tester.run_common_tests()
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __a ( self ) -> int:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ = type
self.model_tester.create_and_check_model(*_a )
def __a ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = 3
lowerCAmelCase_ = input_dict["input_ids"]
lowerCAmelCase_ = input_ids.ne(1 ).to(_a )
lowerCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase_ = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __a ( self ) -> Any:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = 3
lowerCAmelCase_ = "single_label_classification"
lowerCAmelCase_ = input_dict["input_ids"]
lowerCAmelCase_ = input_ids.ne(1 ).to(_a )
lowerCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase_ = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = 3
lowerCAmelCase_ = "multi_label_classification"
lowerCAmelCase_ = input_dict["input_ids"]
lowerCAmelCase_ = input_ids.ne(1 ).to(_a )
lowerCAmelCase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase_ = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def __a ( self ) -> Tuple:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __a ( self , _a ) -> List[Any]:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase_ = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
lowerCAmelCase_ = original_model(_a ).last_hidden_state
lowerCAmelCase_ = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase_ = {"type": scaling_type, "factor": 10.0}
lowerCAmelCase_ = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
lowerCAmelCase_ = scaled_model(_a ).last_hidden_state
lowerCAmelCase_ = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a , _a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_a , _a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a , _a , atol=1E-5 ) )
@require_torch
class __magic_name__ (unittest.TestCase ):
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __a ( self ) -> Any:
lowerCAmelCase_ = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowerCAmelCase_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
lowerCAmelCase_ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase_ = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase_ = torch.tensor([-12.8281, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -12.8281, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _a , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __a ( self ) -> Any:
lowerCAmelCase_ = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowerCAmelCase_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
lowerCAmelCase_ = model(torch.tensor(_a ) )
# Expected mean on dim = -1
lowerCAmelCase_ = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase_ = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _a , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __a ( self ) -> List[str]:
lowerCAmelCase_ = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowerCAmelCase_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
lowerCAmelCase_ = model(torch.tensor(_a ) )
# Expected mean on dim = -1
lowerCAmelCase_ = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase_ = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def __a ( self ) -> Dict:
lowerCAmelCase_ = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowerCAmelCase_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
lowerCAmelCase_ = model(torch.tensor(_a ) )
lowerCAmelCase_ = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase_ = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _a , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
lowerCAmelCase_ = "Simply put, the theory of relativity states that "
lowerCAmelCase_ = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
lowerCAmelCase_ = tokenizer.encode(_a , return_tensors="pt" )
lowerCAmelCase_ = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=_a )
# greedy generation outputs
lowerCAmelCase_ = model.generate(_a , max_new_tokens=64 , top_p=_a , temperature=1 , do_sample=_a )
lowerCAmelCase_ = tokenizer.decode(generated_ids[0] , skip_special_tokens=_a )
self.assertEqual(_a , _a )
| 357 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def A(__a: Any , __a: Union[str, Any] , __a: List[str] ):
lowerCAmelCase_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase_ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowerCAmelCase_ = F"{src_lang}-{tgt_lang}"
lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(__a , exist_ok=__a )
lowerCAmelCase_ = os.path.join(__a , "README.md" )
print(F"Generating {path}" )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(__a )
# make sure we are under the root of the project
lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCamelCase__ = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''')
lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 22 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = Dict[str, Any]
lowerCamelCase__ = List[Prediction]
@add_end_docstrings(__lowercase )
class __magic_name__ (__lowercase ):
def __init__( self , *_a , **_a ) -> Optional[int]:
super().__init__(*_a , **_a )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __a ( self , **_a ) -> Optional[Any]:
lowerCAmelCase_ = {}
if "threshold" in kwargs:
lowerCAmelCase_ = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *_a , **_a ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*_a , **_a )
def __a ( self , _a ) -> Tuple:
lowerCAmelCase_ = load_image(_a )
lowerCAmelCase_ = torch.IntTensor([[image.height, image.width]] )
lowerCAmelCase_ = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
lowerCAmelCase_ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
lowerCAmelCase_ = target_size
return inputs
def __a ( self , _a ) -> List[Any]:
lowerCAmelCase_ = model_inputs.pop("target_size" )
lowerCAmelCase_ = self.model(**_a )
lowerCAmelCase_ = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
lowerCAmelCase_ = model_inputs["bbox"]
return model_outputs
def __a ( self , _a , _a=0.9 ) -> List[Any]:
lowerCAmelCase_ = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowerCAmelCase_ , lowerCAmelCase_ = target_size[0].tolist()
def unnormalize(_a ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
lowerCAmelCase_ , lowerCAmelCase_ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
lowerCAmelCase_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowerCAmelCase_ = [unnormalize(_a ) for bbox in model_outputs["bbox"].squeeze(0 )]
lowerCAmelCase_ = ["score", "label", "box"]
lowerCAmelCase_ = [dict(zip(_a , _a ) ) for vals in zip(scores.tolist() , _a , _a ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowerCAmelCase_ = self.image_processor.post_process_object_detection(_a , _a , _a )
lowerCAmelCase_ = raw_annotations[0]
lowerCAmelCase_ = raw_annotation["scores"]
lowerCAmelCase_ = raw_annotation["labels"]
lowerCAmelCase_ = raw_annotation["boxes"]
lowerCAmelCase_ = scores.tolist()
lowerCAmelCase_ = [self.model.config.idalabel[label.item()] for label in labels]
lowerCAmelCase_ = [self._get_bounding_box(_a ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowerCAmelCase_ = ["score", "label", "box"]
lowerCAmelCase_ = [
dict(zip(_a , _a ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def __a ( self , _a ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = box.int().tolist()
lowerCAmelCase_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 358 |
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def A(__a: str ):
re.sub("<n>" , "" , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) )
| 22 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 359 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
lowerCamelCase__ = {
'''allenai/longformer-base-4096''': 40_96,
'''allenai/longformer-large-4096''': 40_96,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A():
lowerCAmelCase_ = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowerCAmelCase_ = bs[:]
lowerCAmelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__a )
cs.append(2**8 + n )
n += 1
lowerCAmelCase_ = [chr(__a ) for n in cs]
return dict(zip(__a , __a ) )
def A(__a: Optional[Any] ):
lowerCAmelCase_ = set()
lowerCAmelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase_ = char
return pairs
class __magic_name__ (__lowercase ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self , _a , _a , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , **_a , ) -> Tuple:
lowerCAmelCase_ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
lowerCAmelCase_ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
lowerCAmelCase_ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
lowerCAmelCase_ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
lowerCAmelCase_ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
lowerCAmelCase_ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
errors=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , **_a , )
with open(_a , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_ = json.load(_a )
lowerCAmelCase_ = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ = errors # how to handle errors in decoding
lowerCAmelCase_ = bytes_to_unicode()
lowerCAmelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(_a , encoding="utf-8" ) as merges_handle:
lowerCAmelCase_ = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase_ = dict(zip(_a , range(len(_a ) ) ) )
lowerCAmelCase_ = {}
lowerCAmelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __a ( self ) -> Optional[int]:
return len(self.encoder )
def __a ( self ) -> Optional[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self , _a ) -> int:
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ = tuple(_a )
lowerCAmelCase_ = get_pairs(_a )
if not pairs:
return token
while True:
lowerCAmelCase_ = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ , lowerCAmelCase_ = bigram
lowerCAmelCase_ = []
lowerCAmelCase_ = 0
while i < len(_a ):
try:
lowerCAmelCase_ = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ = tuple(_a )
lowerCAmelCase_ = new_word
if len(_a ) == 1:
break
else:
lowerCAmelCase_ = get_pairs(_a )
lowerCAmelCase_ = " ".join(_a )
lowerCAmelCase_ = word
return word
def __a ( self , _a ) -> Tuple:
lowerCAmelCase_ = []
for token in re.findall(self.pat , _a ):
lowerCAmelCase_ = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(" " ) )
return bpe_tokens
def __a ( self , _a ) -> int:
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def __a ( self , _a ) -> Any:
return self.decoder.get(_a )
def __a ( self , _a ) -> List[Any]:
lowerCAmelCase_ = "".join(_a )
lowerCAmelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def __a ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase_ = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + "\n" )
lowerCAmelCase_ = 0
with open(_a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase_ = token_index
writer.write(" ".join(_a ) + "\n" )
index += 1
return vocab_file, merge_file
def __a ( self , _a , _a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
lowerCAmelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def __a ( self , _a , _a = None ) -> List[int]:
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self , _a , _a=False , **_a ) -> Tuple:
lowerCAmelCase_ = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
lowerCAmelCase_ = " " + text
return (text, kwargs)
| 360 |
import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''bertabs'''
def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]:
super().__init__(**_a )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_pos
lowerCAmelCase_ = enc_layers
lowerCAmelCase_ = enc_hidden_size
lowerCAmelCase_ = enc_heads
lowerCAmelCase_ = enc_ff_size
lowerCAmelCase_ = enc_dropout
lowerCAmelCase_ = dec_layers
lowerCAmelCase_ = dec_hidden_size
lowerCAmelCase_ = dec_heads
lowerCAmelCase_ = dec_ff_size
lowerCAmelCase_ = dec_dropout
| 22 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViTImageProcessor'''
lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , _a=None , _a=None , **_a ) -> Tuple:
lowerCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _a , )
lowerCAmelCase_ = kwargs.pop("feature_extractor" )
lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_a , _a )
def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict:
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a )
if visual_prompt is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if images is not None:
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a )
if visual_prompt is not None and images is not None:
lowerCAmelCase_ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase_ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def __a ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.batch_decode(*_a , **_a )
def __a ( self , *_a , **_a ) -> Optional[int]:
return self.tokenizer.decode(*_a , **_a )
@property
def __a ( self ) -> List[str]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , )
return self.image_processor_class
@property
def __a ( self ) -> Optional[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , )
return self.image_processor
| 361 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A(__a: Tuple , __a: Union[str, Any] ):
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = {}
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["quant_conv.bias"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a )
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a )
}
for i in range(__a ):
lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
for i in range(__a ):
lowerCAmelCase_ = num_up_blocks - 1 - i
lowerCAmelCase_ = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
return new_checkpoint
def A(__a: str , __a: str , ):
# Only support V1
lowerCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
lowerCAmelCase_ = io.BytesIO(r.content )
lowerCAmelCase_ = OmegaConf.load(__a )
lowerCAmelCase_ = 512
lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
lowerCAmelCase_ = {}
with safe_open(__a , framework="pt" , device="cpu" ) as f:
for key in f.keys():
lowerCAmelCase_ = f.get_tensor(__a )
else:
lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"]
# Convert the VAE model.
lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a )
lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a )
lowerCAmelCase_ = AutoencoderKL(**__a )
vae.load_state_dict(__a )
vae.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
lowerCamelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 22 | 0 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class __magic_name__ :
@property
def __a ( self ) -> int:
return self.get_dummy_input()
@property
def __a ( self ) -> str:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." )
def __a ( self , _a=True , _a=False , _a=False , _a=False , ) -> List[Any]:
lowerCAmelCase_ = 4
lowerCAmelCase_ = 32
lowerCAmelCase_ = (32, 32)
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = torch.device(_a )
lowerCAmelCase_ = (batch_size, num_channels) + sizes
lowerCAmelCase_ = randn_tensor(_a , generator=_a , device=_a )
lowerCAmelCase_ = {"hidden_states": hidden_states}
if include_temb:
lowerCAmelCase_ = 128
lowerCAmelCase_ = randn_tensor((batch_size, temb_channels) , generator=_a , device=_a )
if include_res_hidden_states_tuple:
lowerCAmelCase_ = torch.manual_seed(1 )
lowerCAmelCase_ = (randn_tensor(_a , generator=_a , device=_a ),)
if include_encoder_hidden_states:
lowerCAmelCase_ = floats_tensor((batch_size, 32, 32) ).to(_a )
if include_skip_sample:
lowerCAmelCase_ = randn_tensor(((batch_size, 3) + sizes) , generator=_a , device=_a )
return dummy_input
def __a ( self ) -> List[str]:
lowerCAmelCase_ = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
lowerCAmelCase_ = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
lowerCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def __a ( self , _a ) -> Optional[int]:
lowerCAmelCase_ , lowerCAmelCase_ = self.prepare_init_args_and_inputs_for_common()
lowerCAmelCase_ = self.block_class(**_a )
unet_block.to(_a )
unet_block.eval()
with torch.no_grad():
lowerCAmelCase_ = unet_block(**_a )
if isinstance(_a , _a ):
lowerCAmelCase_ = output[0]
self.assertEqual(output.shape , self.output_shape )
lowerCAmelCase_ = output[0, -1, -3:, -3:]
lowerCAmelCase_ = torch.tensor(_a ).to(_a )
assert torch_all_close(output_slice.flatten() , _a , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ , lowerCAmelCase_ = self.prepare_init_args_and_inputs_for_common()
lowerCAmelCase_ = self.block_class(**_a )
model.to(_a )
model.train()
lowerCAmelCase_ = model(**_a )
if isinstance(_a , _a ):
lowerCAmelCase_ = output[0]
lowerCAmelCase_ = torch.device(_a )
lowerCAmelCase_ = randn_tensor(output.shape , device=_a )
lowerCAmelCase_ = torch.nn.functional.mse_loss(_a , _a )
loss.backward()
| 362 |
def A():
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCamelCase__ = generate_large_matrix()
lowerCamelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A(__a: list[list[int]] ):
assert all(row == sorted(__a , reverse=__a ) for row in grid )
assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) )
def A(__a: list[int] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(__a ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCAmelCase_ = (left + right) // 2
lowerCAmelCase_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCAmelCase_ = mid + 1
else:
lowerCAmelCase_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__a )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(grid[0] )
for i in range(len(__a ) ):
lowerCAmelCase_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__a ) * len(grid[0] )) - total
def A(__a: list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
for row in grid:
for i, number in enumerate(__a ):
if number < 0:
total += len(__a ) - i
break
return total
def A():
from timeit import timeit
print("Running benchmarks" )
lowerCAmelCase_ = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 22 | 0 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __magic_name__ :
def __init__( self , _a ) -> Optional[Any]:
lowerCAmelCase_ = data
lowerCAmelCase_ = [0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0]
@staticmethod
def __a ( _a , _a ) -> List[str]:
return ((n << b) | (n >> (32 - b))) & 0xffffffff
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = b"\x80" + b"\x00" * (63 - (len(self.data ) + 8) % 64)
lowerCAmelCase_ = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) )
return padded_data
def __a ( self ) -> Tuple:
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __a ( self , _a ) -> Optional[Any]:
lowerCAmelCase_ = list(struct.unpack(">16L" , _a ) ) + [0] * 64
for i in range(16 , 80 ):
lowerCAmelCase_ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __a ( self ) -> Dict:
lowerCAmelCase_ = self.padding()
lowerCAmelCase_ = self.split_blocks()
for block in self.blocks:
lowerCAmelCase_ = self.expand_block(_a )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
lowerCAmelCase_ = (b & c) | ((~b) & d)
lowerCAmelCase_ = 0x5a827999
elif 20 <= i < 40:
lowerCAmelCase_ = b ^ c ^ d
lowerCAmelCase_ = 0x6ed9eba1
elif 40 <= i < 60:
lowerCAmelCase_ = (b & c) | (b & d) | (c & d)
lowerCAmelCase_ = 0x8f1bbcdc
elif 60 <= i < 80:
lowerCAmelCase_ = b ^ c ^ d
lowerCAmelCase_ = 0xca62c1d6
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = (
self.rotate(_a , 5 ) + f + e + k + expanded_block[i] & 0xffffffff,
a,
self.rotate(_a , 30 ),
c,
d,
)
lowerCAmelCase_ = (
self.h[0] + a & 0xffffffff,
self.h[1] + b & 0xffffffff,
self.h[2] + c & 0xffffffff,
self.h[3] + d & 0xffffffff,
self.h[4] + e & 0xffffffff,
)
return ("{:08x}" * 5).format(*self.h )
def A():
lowerCAmelCase_ = b"Test String"
assert SHAaHash(__a ).final_hash() == hashlib.shaa(__a ).hexdigest() # noqa: S324
def A():
lowerCAmelCase_ = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" )
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
lowerCAmelCase_ = f.read()
else:
lowerCAmelCase_ = bytes(__a , "utf-8" )
print(SHAaHash(__a ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod() | 363 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def A(__a: Dict ):
lowerCAmelCase_ = r"\w+[.]\d+"
lowerCAmelCase_ = re.findall(__a , __a )
for pat in pats:
lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def A(__a: str , __a: Tuple , __a: List[Any] ):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A(__a: Dict , __a: Any , __a: List[Any]=42 ):
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) )
lowerCAmelCase_ = flatten_dict(__a )
lowerCAmelCase_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase_ = rename_key(__a )
lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(__a )
return unflatten_dict(__a )
| 22 | 0 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def A(__a: Tuple ):
lowerCAmelCase_ = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
lowerCAmelCase_ = re.match(r"^mobilenet_v1_([^_]*)_([^_]*)$" , __a )
if matches:
lowerCAmelCase_ = float(matches[1] )
lowerCAmelCase_ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowerCAmelCase_ = 1001
lowerCAmelCase_ = "imagenet-1k-id2label.json"
lowerCAmelCase_ = "huggingface/label-files"
lowerCAmelCase_ = json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
lowerCAmelCase_ = {int(__a ) + 1: v for k, v in idalabel.items()}
lowerCAmelCase_ = "background"
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def A():
lowerCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase_ = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def A(__a: List[Any] , __a: Dict , __a: Dict , __a: int=False ):
lowerCAmelCase_ = get_mobilenet_va_config(__a )
# Load 🤗 model
lowerCAmelCase_ = MobileNetVaForImageClassification(__a ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(__a , __a , __a )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowerCAmelCase_ = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCAmelCase_ = model(**__a )
lowerCAmelCase_ = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
lowerCAmelCase_ = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
lowerCAmelCase_ = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
lowerCAmelCase_ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , __a , atol=1E-4 )
Path(__a ).mkdir(exist_ok=__a )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__a )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__a )
if push_to_hub:
print("Pushing to the hub..." )
lowerCAmelCase_ = "google/" + model_name
image_processor.push_to_hub(__a )
model.push_to_hub(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 364 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''OwlViTFeatureExtractor''']
lowerCamelCase__ = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 365 |
import math
def A(__a: int ):
return math.sqrt(__a ) * math.sqrt(__a ) == num
def A(__a: int ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = n
while left <= right:
lowerCAmelCase_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase_ = mid - 1
else:
lowerCAmelCase_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ (__lowercase ):
@require_torch
def __a ( self ) -> Dict:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
lowerCAmelCase_ = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
lowerCAmelCase_ = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
lowerCAmelCase_ = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
lowerCAmelCase_ = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(_a )
BertModel.from_pretrained(_a )
BertTokenizer.from_pretrained(_a )
pipeline(task="fill-mask" , model=_a )
# baseline - just load from_pretrained with normal network
lowerCAmelCase_ = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
lowerCAmelCase_ = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowerCAmelCase_ = "1"
lowerCAmelCase_ = subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def __a ( self ) -> List[str]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
lowerCAmelCase_ = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
lowerCAmelCase_ = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
lowerCAmelCase_ = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
lowerCAmelCase_ = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(_a )
BertModel.from_pretrained(_a )
BertTokenizer.from_pretrained(_a )
pipeline(task="fill-mask" , model=_a )
# baseline - just load from_pretrained with normal network
lowerCAmelCase_ = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
lowerCAmelCase_ = self.get_env()
lowerCAmelCase_ = subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def __a ( self ) -> Optional[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
lowerCAmelCase_ = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
lowerCAmelCase_ = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
lowerCAmelCase_ = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
lowerCAmelCase_ = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
lowerCAmelCase_ = self.get_env()
lowerCAmelCase_ = subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
lowerCAmelCase_ = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowerCAmelCase_ = "1"
lowerCAmelCase_ = subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = "\nfrom transformers import pipeline\n "
lowerCAmelCase_ = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
lowerCAmelCase_ = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
lowerCAmelCase_ = self.get_env()
lowerCAmelCase_ = "1"
lowerCAmelCase_ = [sys.executable, "-c", "\n".join([load, mock, run] )]
lowerCAmelCase_ = subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = "\nfrom transformers import AutoModel\n "
lowerCAmelCase_ = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
lowerCAmelCase_ = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
lowerCAmelCase_ = self.get_env()
lowerCAmelCase_ = subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowerCAmelCase_ = "1"
lowerCAmelCase_ = subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 366 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def A(__a: Dict , __a: List[str]=None ):
require_version(deps[pkg] , __a )
| 22 | 0 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = DownBlockaD # noqa F405
lowerCamelCase__ = '''down'''
def __a ( self ) -> Dict:
lowerCAmelCase_ = [-0.0_2_3_2, -0.9_8_6_9, 0.8_0_5_4, -0.0_6_3_7, -0.1_6_8_8, -1.4_2_6_4, 0.4_4_7_0, -1.3_3_9_4, 0.0_9_0_4]
super().test_output(_a )
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = ResnetDownsampleBlockaD # noqa F405
lowerCamelCase__ = '''down'''
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = [0.0_7_1_0, 0.2_4_1_0, -0.7_3_2_0, -1.0_7_5_7, -1.1_3_4_3, 0.3_5_4_0, -0.0_1_3_3, -0.2_5_7_6, 0.0_9_4_8]
super().test_output(_a )
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = AttnDownBlockaD # noqa F405
lowerCamelCase__ = '''down'''
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = [0.0_6_3_6, 0.8_9_6_4, -0.6_2_3_4, -1.0_1_3_1, 0.0_8_4_4, 0.4_9_3_5, 0.3_4_3_7, 0.0_9_1_1, -0.2_9_5_7]
super().test_output(_a )
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = CrossAttnDownBlockaD # noqa F405
lowerCamelCase__ = '''down'''
def __a ( self ) -> int:
lowerCAmelCase_ , lowerCAmelCase_ = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase_ = 32
return init_dict, inputs_dict
def __a ( self ) -> Tuple:
lowerCAmelCase_ = [0.2_2_3_8, -0.7_3_9_6, -0.2_2_5_5, -0.3_8_2_9, 0.1_9_2_5, 1.1_6_6_5, 0.0_6_0_3, -0.7_2_9_5, 0.1_9_8_3]
super().test_output(_a )
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = SimpleCrossAttnDownBlockaD # noqa F405
lowerCamelCase__ = '''down'''
@property
def __a ( self ) -> Any:
return super().get_dummy_input(include_encoder_hidden_states=_a )
def __a ( self ) -> int:
lowerCAmelCase_ , lowerCAmelCase_ = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase_ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = [0.7_9_2_1, -0.0_9_9_2, -0.1_9_6_2, -0.7_6_9_5, -0.4_2_4_2, 0.7_8_0_4, 0.4_7_3_7, 0.2_7_6_5, 0.3_3_3_8]
super().test_output(_a )
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = SkipDownBlockaD # noqa F405
lowerCamelCase__ = '''down'''
@property
def __a ( self ) -> Optional[int]:
return super().get_dummy_input(include_skip_sample=_a )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = [-0.0_8_4_5, -0.2_0_8_7, -0.2_4_6_5, 0.0_9_7_1, 0.1_9_0_0, -0.0_4_8_4, 0.2_6_6_4, 0.4_1_7_9, 0.5_0_6_9]
super().test_output(_a )
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = AttnSkipDownBlockaD # noqa F405
lowerCamelCase__ = '''down'''
@property
def __a ( self ) -> Any:
return super().get_dummy_input(include_skip_sample=_a )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = [0.5_5_3_9, 0.1_6_0_9, 0.4_9_2_4, 0.0_5_3_7, -0.1_9_9_5, 0.4_0_5_0, 0.0_9_7_9, -0.2_7_2_1, -0.0_6_4_2]
super().test_output(_a )
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = DownEncoderBlockaD # noqa F405
lowerCamelCase__ = '''down'''
@property
def __a ( self ) -> Union[str, Any]:
return super().get_dummy_input(include_temb=_a )
def __a ( self ) -> Dict:
lowerCAmelCase_ = {
"in_channels": 32,
"out_channels": 32,
}
lowerCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def __a ( self ) -> Dict:
lowerCAmelCase_ = [1.1_1_0_2, 0.5_3_0_2, 0.4_8_7_2, -0.0_0_2_3, -0.8_0_4_2, 0.0_4_8_3, -0.3_4_8_9, -0.5_6_3_2, 0.7_6_2_6]
super().test_output(_a )
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = AttnDownEncoderBlockaD # noqa F405
lowerCamelCase__ = '''down'''
@property
def __a ( self ) -> List[Any]:
return super().get_dummy_input(include_temb=_a )
def __a ( self ) -> Dict:
lowerCAmelCase_ = {
"in_channels": 32,
"out_channels": 32,
}
lowerCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = [0.8_9_6_6, -0.1_4_8_6, 0.8_5_6_8, 0.8_1_4_1, -0.9_0_4_6, -0.1_3_4_2, -0.0_9_7_2, -0.7_4_1_7, 0.1_5_3_8]
super().test_output(_a )
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = UNetMidBlockaD # noqa F405
lowerCamelCase__ = '''mid'''
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = {
"in_channels": 32,
"temb_channels": 128,
}
lowerCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def __a ( self ) -> List[str]:
lowerCAmelCase_ = [-0.1_0_6_2, 1.7_2_4_8, 0.3_4_9_4, 1.4_5_6_9, -0.0_9_1_0, -1.2_4_2_1, -0.9_9_8_4, 0.6_7_3_6, 1.0_0_2_8]
super().test_output(_a )
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = UNetMidBlockaDCrossAttn # noqa F405
lowerCamelCase__ = '''mid'''
def __a ( self ) -> int:
lowerCAmelCase_ , lowerCAmelCase_ = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase_ = 32
return init_dict, inputs_dict
def __a ( self ) -> Tuple:
lowerCAmelCase_ = [0.0_1_8_7, 2.4_2_2_0, 0.4_4_8_4, 1.1_2_0_3, -0.6_1_2_1, -1.5_1_2_2, -0.8_2_7_0, 0.7_8_5_1, 1.8_3_3_5]
super().test_output(_a )
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCamelCase__ = '''mid'''
@property
def __a ( self ) -> Dict:
return super().get_dummy_input(include_encoder_hidden_states=_a )
def __a ( self ) -> Any:
lowerCAmelCase_ , lowerCAmelCase_ = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase_ = 32
return init_dict, inputs_dict
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = [0.7_1_4_3, 1.9_9_7_4, 0.5_4_4_8, 1.3_9_7_7, 0.1_2_8_2, -1.1_2_3_7, -1.4_2_3_8, 0.5_5_3_0, 0.8_8_8_0]
super().test_output(_a )
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = UpBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __a ( self ) -> str:
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = [-0.2_0_4_1, -0.4_1_6_5, -0.3_0_2_2, 0.0_0_4_1, -0.6_6_2_8, -0.7_0_5_3, 0.1_9_2_8, -0.0_3_2_5, 0.0_5_2_3]
super().test_output(_a )
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = ResnetUpsampleBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __a ( self ) -> str:
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = [0.2_2_8_7, 0.3_5_4_9, -0.1_3_4_6, 0.4_7_9_7, -0.1_7_1_5, -0.9_6_4_9, 0.7_3_0_5, -0.5_8_6_4, -0.6_2_4_4]
super().test_output(_a )
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = CrossAttnUpBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __a ( self ) -> str:
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __a ( self ) -> Tuple:
lowerCAmelCase_ , lowerCAmelCase_ = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase_ = 32
return init_dict, inputs_dict
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = [-0.1_4_0_3, -0.3_5_1_5, -0.0_4_2_0, -0.1_4_2_5, 0.3_1_6_7, 0.5_0_9_4, -0.2_1_8_1, 0.5_9_3_1, 0.5_5_8_2]
super().test_output(_a )
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = SimpleCrossAttnUpBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __a ( self ) -> Union[str, Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=_a , include_encoder_hidden_states=_a )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ , lowerCAmelCase_ = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase_ = 32
return init_dict, inputs_dict
def __a ( self ) -> Dict:
lowerCAmelCase_ = [0.2_6_4_5, 0.1_4_8_0, 0.0_9_0_9, 0.8_0_4_4, -0.9_7_5_8, -0.9_0_8_3, 0.0_9_9_4, -1.1_4_5_3, -0.7_4_0_2]
super().test_output(_a )
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = AttnUpBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __a ( self ) -> Any:
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def __a ( self ) -> Any:
lowerCAmelCase_ = [0.0_9_7_9, 0.1_3_2_6, 0.0_0_2_1, 0.0_6_5_9, 0.2_2_4_9, 0.0_0_5_9, 0.1_1_3_2, 0.5_9_5_2, 0.1_0_3_3]
super().test_output(_a )
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = SkipUpBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __a ( self ) -> List[str]:
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = [-0.0_8_9_3, -0.1_2_3_4, -0.1_5_0_6, -0.0_3_3_2, 0.0_1_2_3, -0.0_2_1_1, 0.0_5_6_6, 0.0_1_4_3, 0.0_3_6_2]
super().test_output(_a )
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = AttnSkipUpBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __a ( self ) -> str:
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __a ( self ) -> Any:
lowerCAmelCase_ = [0.0_3_6_1, 0.0_6_1_7, 0.2_7_8_7, -0.0_3_5_0, 0.0_3_4_2, 0.3_4_2_1, -0.0_8_4_3, 0.0_9_1_3, 0.3_0_1_5]
super().test_output(_a )
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = UpDecoderBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __a ( self ) -> int:
return super().get_dummy_input(include_temb=_a )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = {"in_channels": 32, "out_channels": 32}
lowerCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def __a ( self ) -> int:
lowerCAmelCase_ = [0.4_4_0_4, 0.1_9_9_8, -0.9_8_8_6, -0.3_3_2_0, -0.3_1_2_8, -0.7_0_3_4, -0.6_9_5_5, -0.2_3_3_8, -0.3_1_3_7]
super().test_output(_a )
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = AttnUpDecoderBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __a ( self ) -> Tuple:
return super().get_dummy_input(include_temb=_a )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = {"in_channels": 32, "out_channels": 32}
lowerCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = [0.6_7_3_8, 0.4_4_9_1, 0.1_0_5_5, 1.0_7_1_0, 0.7_3_1_6, 0.3_3_3_9, 0.3_3_5_2, 0.1_0_2_3, 0.3_5_6_8]
super().test_output(_a )
| 367 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ''' Hello world! cécé herlolip'''
lowerCamelCase__ = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def A(__a: Any ):
lowerCAmelCase_ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ):
lowerCAmelCase_ = dct.pop(__a )
lowerCAmelCase_ = val
def A(__a: Tuple ):
lowerCAmelCase_ = torch.load(__a , map_location="cpu" )
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def A(__a: List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape
lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a )
lowerCAmelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ):
if not os.path.exists(__a ):
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval()
else:
lowerCAmelCase_ = load_xsum_checkpoint(__a )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
lowerCAmelCase_ = checkpoint_path.replace("." , "-" )
lowerCAmelCase_ = BartConfig.from_pretrained(__a )
lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 )
lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(__a , __a ).all():
raise ValueError(
F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
lowerCAmelCase_ = bart.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(__a , __a , __a )
lowerCAmelCase_ = BartForSequenceClassification(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a )
lowerCAmelCase_ = model(__a )[0] # logits
else: # no classification heads to worry about
lowerCAmelCase_ = bart.model.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"]
lowerCAmelCase_ = bart.extract_features(__a )
if hf_checkpoint_name == "facebook/bart-large":
lowerCAmelCase_ = BartModel(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = model(__a ).model[0]
else:
lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt
model.model.load_state_dict(__a )
if hasattr(__a , "lm_head" ):
lowerCAmelCase_ = make_linear_from_emb(model.model.shared )
lowerCAmelCase_ = model.model(__a )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
lowerCamelCase__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 22 | 0 |
import math
def A(__a: int ):
return math.sqrt(__a ) * math.sqrt(__a ) == num
def A(__a: int ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = n
while left <= right:
lowerCAmelCase_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase_ = mid - 1
else:
lowerCAmelCase_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = MobileBertTokenizer
lowerCamelCase__ = MobileBertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = filter_non_english
lowerCamelCase__ = '''google/mobilebert-uncased'''
def __a ( self ) -> Optional[Any]:
super().setUp()
lowerCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowerCAmelCase_ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __a ( self , _a ) -> Any:
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = "unwanted, running"
return input_text, output_text
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def __a ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __a ( self ) -> Dict:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCAmelCase_ = {}
for i, token in enumerate(_a ):
lowerCAmelCase_ = i
lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __a ( self ) -> Optional[int]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __a ( self ) -> List[str]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __a ( self ) -> Dict:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __a ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCAmelCase_ = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False
lowerCAmelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = ["的", "人", "有"]
lowerCAmelCase_ = "".join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = True
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = False
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 22 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''timesformer'''
def __init__( self , _a=224 , _a=16 , _a=3 , _a=8 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.0_2 , _a=1E-6 , _a=True , _a="divided_space_time" , _a=0 , **_a , ) -> Any:
super().__init__(**_a )
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = num_frames
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = qkv_bias
lowerCAmelCase_ = attention_type
lowerCAmelCase_ = drop_path_rate
| 369 |
import math
from collections.abc import Iterator
from itertools import takewhile
def A(__a: int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A():
lowerCAmelCase_ = 2
while True:
if is_prime(__a ):
yield num
num += 1
def A(__a: int = 200_0000 ):
return sum(takewhile(lambda __a : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 22 | 0 |
def A(__a: Tuple ):
lowerCAmelCase_ = len(__a )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )]
# Reverse whole list
lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 370 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''mobilenet_v2'''
def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = depth_multiplier
lowerCAmelCase_ = depth_divisible_by
lowerCAmelCase_ = min_depth
lowerCAmelCase_ = expand_ratio
lowerCAmelCase_ = output_stride
lowerCAmelCase_ = first_layer_is_expansion
lowerCAmelCase_ = finegrained_output
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = tf_padding
lowerCAmelCase_ = classifier_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = semantic_loss_ignore_index
class __magic_name__ (__lowercase ):
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __a ( self ) -> float:
return 1E-4
| 22 | 0 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __magic_name__ (__lowercase ):
def __a ( self ) -> List[str]:
lowerCAmelCase_ = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def __a ( self ) -> int:
with self.assertRaises(_a ):
lowerCAmelCase_ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def __a ( self ) -> Optional[int]:
with self.assertRaises(_a ):
lowerCAmelCase_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def __a ( self ) -> Any:
lowerCAmelCase_ = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __a ( self ) -> Optional[int]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCAmelCase_ = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def __a ( self ) -> Dict:
lowerCAmelCase_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __a ( self ) -> int:
lowerCAmelCase_ = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def __a ( self ) -> Optional[Any]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCAmelCase_ = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def __a ( self ) -> int:
import PIL.Image
lowerCAmelCase_ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=_a ) as mock_cast_to_python_objects:
lowerCAmelCase_ = pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image] , type=Image() ) )
lowerCAmelCase_ , lowerCAmelCase_ = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , _a )
self.assertFalse(kwargs["optimize_list_casting"] )
def A(__a: Optional[Any] , __a: int ):
lowerCAmelCase_ = pa.BufferReader(__a ) if isinstance(__a , pa.Buffer ) else pa.memory_map(__a )
lowerCAmelCase_ = pa.ipc.open_stream(__a )
lowerCAmelCase_ = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def A(__a: int , __a: int ):
lowerCAmelCase_ = pa.BufferOutputStream()
lowerCAmelCase_ = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def A():
lowerCAmelCase_ = pa.BufferOutputStream()
lowerCAmelCase_ = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=__a , features=__a ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
lowerCAmelCase_ = pa.BufferReader(output.getvalue() )
lowerCAmelCase_ = pa.ipc.open_stream(__a )
lowerCAmelCase_ = f.read_all()
lowerCAmelCase_ = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__a )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def A(__a: int ):
lowerCAmelCase_ = pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="split_name" , check_duplicates=__a , ) as writer:
with pytest.raises(__a ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def A(__a: Any ):
lowerCAmelCase_ = pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="split_name" , check_duplicates=__a , ) as writer:
with pytest.raises(__a ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def A(__a: int ):
lowerCAmelCase_ = pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="split_name" , check_duplicates=__a , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def A(__a: List[str] , __a: int ):
lowerCAmelCase_ = pa.BufferOutputStream()
lowerCAmelCase_ = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def A(__a: Union[str, Any] , __a: List[Any] ):
lowerCAmelCase_ = pa.BufferOutputStream()
lowerCAmelCase_ = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def A(__a: Dict , __a: Dict ):
lowerCAmelCase_ = pa.BufferOutputStream()
lowerCAmelCase_ = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def A():
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
lowerCAmelCase_ = os.path.join(__a , "test.arrow" )
with ArrowWriter(path=__a , schema=pa.schema(__a ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(__a , 1 )
def A(__a: str ):
if pa.types.is_list(__a ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def A(__a: Tuple , __a: Any ):
if isinstance(lst[0] , __a ):
change_first_primitive_element_in_list(lst[0] , __a )
else:
lowerCAmelCase_ = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def A(__a: Any , __a: str , __a: int ):
lowerCAmelCase_ = pa.array(TypedSequence(__a , optimized_int_type=__a ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def A(__a: Tuple , __a: str , __a: Optional[int] ):
# in range
lowerCAmelCase_ = pa.array(OptimizedTypedSequence(__a , col=__a ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
lowerCAmelCase_ = copy.deepcopy(__a )
lowerCAmelCase_ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__a , __a )
lowerCAmelCase_ = pa.array(OptimizedTypedSequence(__a , col=__a ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def A(__a: Dict , __a: List[str] ):
lowerCAmelCase_ = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=__a ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def A(__a: Union[str, Any] ):
lowerCAmelCase_ = "mock://dataset-train.arrow"
with ArrowWriter(path=__a , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__a ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__a )
def A():
lowerCAmelCase_ = pa.BufferOutputStream()
with ParquetWriter(stream=__a ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
lowerCAmelCase_ = pa.BufferReader(output.getvalue() )
lowerCAmelCase_ = pq.read_table(__a )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def A(__a: Tuple , __a: Dict ):
import PIL.Image
lowerCAmelCase_ = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__a , format="png" )
lowerCAmelCase_ = pa.BufferOutputStream()
with ParquetWriter(
stream=__a , features=Features({"image": Image()} ) , embed_local_files=__a ) as writer:
writer.write({"image": image_path} )
writer.finalize()
lowerCAmelCase_ = pa.BufferReader(output.getvalue() )
lowerCAmelCase_ = pq.read_table(__a )
lowerCAmelCase_ = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , __a )
with open(__a , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def A():
lowerCAmelCase_ = pa.schema([pa.field("col_1" , pa.string() , nullable=__a )] )
lowerCAmelCase_ = pa.BufferOutputStream()
with ArrowWriter(stream=__a ) as writer:
writer._build_writer(inferred_schema=__a )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 371 |
from __future__ import annotations
def A(__a: dict , __a: str ):
lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start]
while stack:
lowerCAmelCase_ = stack.pop()
explored.add(__a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__a )
return explored
lowerCamelCase__ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 22 | 0 |
from collections.abc import Callable
def A(__a: Callable[[float], float] , __a: float , __a: float ):
lowerCAmelCase_ = a
lowerCAmelCase_ = b
if function(__a ) == 0: # one of the a or b is a root for the function
return a
elif function(__a ) == 0:
return b
elif (
function(__a ) * function(__a ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
lowerCAmelCase_ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__a ) == 0:
return mid
elif function(__a ) * function(__a ) < 0:
lowerCAmelCase_ = mid
else:
lowerCAmelCase_ = mid
lowerCAmelCase_ = start + (end - start) / 2.0
return mid
def A(__a: float ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 350 |
def A(__a: Tuple ):
lowerCAmelCase_ = len(__a )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )]
# Reverse whole list
lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 22 | 0 |
def A(__a: int ):
if not isinstance(__a , __a ):
lowerCAmelCase_ = F"Input value of [number={number}] must be an integer"
raise TypeError(__a )
if number < 0:
return False
lowerCAmelCase_ = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
import string
from math import logaa
def A(__a: str , __a: str ):
lowerCAmelCase_ = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
lowerCAmelCase_ = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A(__a: str , __a: str ):
lowerCAmelCase_ = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCAmelCase_ = corpus_without_punctuation.split("\n" )
lowerCAmelCase_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__a ))
def A(__a: int , __a: int , __a: List[Any]=False ):
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def A(__a: int , __a: int ):
return round(tf * idf , 3 )
| 22 | 0 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCamelCase__ = 2
class __magic_name__ :
def __init__( self , *, # begin keyword-only arguments
_a="<s>" , _a="<pad>" , _a="</s>" , _a="<unk>" , _a=None , ) -> int:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = bos, unk, pad, eos
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = {}
lowerCAmelCase_ = self.add_symbol(_a )
lowerCAmelCase_ = self.add_symbol(_a )
lowerCAmelCase_ = self.add_symbol(_a )
lowerCAmelCase_ = self.add_symbol(_a )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_a )
lowerCAmelCase_ = len(self.symbols )
def __eq__( self , _a ) -> Dict:
return self.indices == other.indices
def __getitem__( self , _a ) -> List[Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ) -> List[str]:
return len(self.symbols )
def __contains__( self , _a ) -> str:
return sym in self.indices
@classmethod
def __a ( cls , _a ) -> List[str]:
lowerCAmelCase_ = cls()
d.add_from_file(_a )
return d
def __a ( self , _a , _a=1 , _a=False ) -> List[Any]:
if word in self.indices and not overwrite:
lowerCAmelCase_ = self.indices[word]
lowerCAmelCase_ = self.count[idx] + n
return idx
else:
lowerCAmelCase_ = len(self.symbols )
lowerCAmelCase_ = idx
self.symbols.append(_a )
self.count.append(_a )
return idx
def __a ( self , _a ) -> str:
return 0
def __a ( self , _a ) -> Optional[int]:
if isinstance(_a , _a ):
try:
with open(_a , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(_a ) )
return
lowerCAmelCase_ = f.readlines()
lowerCAmelCase_ = self._load_meta(_a )
for line in lines[indices_start_line:]:
try:
lowerCAmelCase_ , lowerCAmelCase_ = line.rstrip().rsplit(" " , 1 )
if field == "#fairseq:overwrite":
lowerCAmelCase_ = True
lowerCAmelCase_ , lowerCAmelCase_ = line.rsplit(" " , 1 )
else:
lowerCAmelCase_ = False
lowerCAmelCase_ = int(_a )
lowerCAmelCase_ = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(_a ) )
self.add_symbol(_a , n=_a , overwrite=_a )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def A(__a: List[Any] ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCAmelCase_ = dict((re.sub(r"@@$" , "" , __a ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , __a ), v) for k, v in d.items() )
lowerCAmelCase_ = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F"{k}</w>"]
lowerCAmelCase_ = d[k] # restore
return da
def A(__a: Optional[Any] , __a: Dict ):
# prep
if not os.path.exists(__a ):
raise ValueError(F"path {biogpt_checkpoint_path} does not exist!" )
os.makedirs(__a , exist_ok=__a )
print(F"Writing results to {pytorch_dump_folder_path}" )
# handle various types of models
lowerCAmelCase_ = os.path.join(__a , "checkpoint.pt" )
if not os.path.isfile(__a ):
raise ValueError(F"path to the file {checkpoint_file} does not exist!" )
lowerCAmelCase_ = torch.load(__a , map_location="cpu" )
lowerCAmelCase_ = chkpt["cfg"]["model"]
# dicts
lowerCAmelCase_ = os.path.join(__a , "dict.txt" )
if not os.path.isfile(__a ):
raise ValueError(F"path to the file {dict_file} does not exist!" )
lowerCAmelCase_ = Dictionary.load(__a )
lowerCAmelCase_ = rewrite_dict_keys(src_dict.indices )
lowerCAmelCase_ = len(__a )
lowerCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
print(F"Generating {src_vocab_file} of {src_vocab_size} records" )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__a , ensure_ascii=__a , indent=__a ) )
# merges_file (bpecodes)
lowerCAmelCase_ = os.path.join(__a , "bpecodes" )
if not os.path.isfile(__a ):
raise ValueError(F"path to the file {bpecodes_file} does not exist!" )
lowerCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(__a , __a )
# model config
lowerCAmelCase_ = os.path.join(__a , "config.json" )
lowerCAmelCase_ = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F"Generating {biogpt_model_config_file}" )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__a , ensure_ascii=__a , indent=__a ) )
# tokenizer config
lowerCAmelCase_ = os.path.join(__a , __a )
lowerCAmelCase_ = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F"Generating {biogpt_tokenizer_config_file}" )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__a , ensure_ascii=__a , indent=__a ) )
# model
lowerCAmelCase_ = chkpt["model"]
# remove unneeded keys
lowerCAmelCase_ = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(__a , __a )
lowerCAmelCase_ = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
lowerCAmelCase_ = model_state_dict.pop(__a )
else:
lowerCAmelCase_ = model_state_dict.pop(__a )
lowerCAmelCase_ = BioGptConfig.from_pretrained(__a )
lowerCAmelCase_ = BioGptForCausalLM(__a )
# check that it loads ok
model_new.load_state_dict(__a )
# save
lowerCAmelCase_ = os.path.join(__a , __a )
print(F"Generating {pytorch_weights_dump_path}" )
torch.save(__a , __a )
print("Conversion is done!" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 352 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase__ = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def A(__a: str , __a: List[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
return (preds == labels).mean()
def A(__a: Any , __a: Any ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = simple_accuracy(__a , __a )
lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def A(__a: List[str] , __a: Optional[int] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = pearsonr(__a , __a )[0]
lowerCAmelCase_ = spearmanr(__a , __a )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def A(__a: Union[str, Any] , __a: Any , __a: str ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(__a , __a )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "mrpc":
return acc_and_fa(__a , __a )
elif task_name == "sts-b":
return pearson_and_spearman(__a , __a )
elif task_name == "qqp":
return acc_and_fa(__a , __a )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__a , __a )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__a , __a )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "rte":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "hans":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
if len(__a ) != len(__a ):
raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
| 22 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.