code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
from collections.abc import MutableSequence
class __UpperCamelCase :
def __init__( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : MutableSequence[float] ) -> None:
"""simple docstring"""
if len(_lowerCAmelCase ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
__lowercase = list(_lowerCAmelCase )
__lowercase = degree
def __add__( self : Optional[int] , _lowerCAmelCase : Polynomial ) -> Polynomial:
"""simple docstring"""
if self.degree > polynomial_a.degree:
__lowercase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _lowerCAmelCase )
else:
__lowercase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _lowerCAmelCase )
def __sub__( self : int , _lowerCAmelCase : Polynomial ) -> Polynomial:
"""simple docstring"""
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Union[str, Any] ) -> Polynomial:
"""simple docstring"""
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Optional[int] , _lowerCAmelCase : Polynomial ) -> Polynomial:
"""simple docstring"""
__lowercase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _lowerCAmelCase )
def _a ( self : Optional[int] , _lowerCAmelCase : int | float ) -> int | float:
"""simple docstring"""
__lowercase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Dict ) -> str:
"""simple docstring"""
__lowercase = """"""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_lowerCAmelCase )
return polynomial
def __repr__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.__str__()
def _a ( self : List[str] ) -> Polynomial:
"""simple docstring"""
__lowercase = [0] * self.degree
for i in range(self.degree ):
__lowercase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : int | float = 0 ) -> Polynomial:
"""simple docstring"""
__lowercase = [0] * (self.degree + 2)
__lowercase = constant
for i in range(self.degree + 1 ):
__lowercase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _lowerCAmelCase )
def __eq__( self : List[str] , _lowerCAmelCase : object ) -> bool:
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Dict , _lowerCAmelCase : object ) -> bool:
"""simple docstring"""
return not self.__eq__(_lowerCAmelCase )
| 80 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) ,np.inf )
a_ = 0
a_ = np.empty((rows, cols) ,dtype=lowercase__ )
a_ = None
while queue:
((a_) , (a_)) = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ ,(dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = AutoencoderKL
__UpperCAmelCase : Optional[Any] = "sample"
__UpperCAmelCase : Optional[int] = 1e-2
@property
def __snake_case ( self : Dict ) -> Optional[Any]:
__snake_case : Optional[Any] = 4
__snake_case : Tuple = 3
__snake_case : List[str] = (32, 32)
__snake_case : str = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase )
return {"sample": image}
@property
def __snake_case ( self : Union[str, Any] ) -> Tuple:
return (3, 32, 32)
@property
def __snake_case ( self : int ) -> int:
return (3, 32, 32)
def __snake_case ( self : Optional[Any] ) -> Dict:
__snake_case : Optional[Any] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
__snake_case : Any = self.dummy_input
return init_dict, inputs_dict
def __snake_case ( self : str ) -> Dict:
pass
def __snake_case ( self : Tuple ) -> List[str]:
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def __snake_case ( self : Any ) -> Optional[Any]:
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case : int = self.prepare_init_args_and_inputs_for_common()
__snake_case : str = self.model_class(**lowerCamelCase )
model.to(lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
__snake_case : str = model(**lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case : Any = torch.randn_like(lowerCamelCase )
__snake_case : str = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case : Optional[int] = self.model_class(**lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case : int = model_a(**lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case : Union[str, Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__snake_case : Optional[int] = dict(model.named_parameters() )
__snake_case : List[Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def __snake_case ( self : List[Any] ) -> Optional[int]:
__snake_case , __snake_case : Optional[Any] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowerCamelCase )
__snake_case : Optional[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __snake_case ( self : Optional[Any] ) -> Union[str, Any]:
__snake_case : Tuple = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
__snake_case : Dict = model.to(lowerCamelCase )
model.eval()
if torch_device == "mps":
__snake_case : int = torch.manual_seed(0 )
else:
__snake_case : str = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__snake_case : List[str] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case : Union[str, Any] = image.to(lowerCamelCase )
with torch.no_grad():
__snake_case : str = model(lowerCamelCase , sample_posterior=lowerCamelCase , generator=lowerCamelCase ).sample
__snake_case : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case : Union[str, Any] = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
__snake_case : Tuple = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
__snake_case : List[str] = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(lowerCamelCase , lowerCamelCase , rtol=1E-2 ) )
@slow
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : int , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] ) -> List[str]:
return F'gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase ) for s in shape] )}.npy'
def __snake_case ( self : List[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Tuple , lowerCamelCase : List[Any]=0 , lowerCamelCase : Tuple=(4, 3, 512, 512) , lowerCamelCase : Optional[int]=False ) -> str:
__snake_case : List[Any] = torch.floataa if fpaa else torch.floataa
__snake_case : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase , lowerCamelCase ) ) ).to(lowerCamelCase ).to(lowerCamelCase )
return image
def __snake_case ( self : Optional[Any] , lowerCamelCase : int="CompVis/stable-diffusion-v1-4" , lowerCamelCase : int=False ) -> int:
__snake_case : str = "fp16" if fpaa else None
__snake_case : int = torch.floataa if fpaa else torch.floataa
__snake_case : int = AutoencoderKL.from_pretrained(
lowerCamelCase , subfolder="vae" , torch_dtype=lowerCamelCase , revision=lowerCamelCase , )
model.to(lowerCamelCase ).eval()
return model
def __snake_case ( self : str , lowerCamelCase : int=0 ) -> Optional[Any]:
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase )
return torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def __snake_case ( self : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ) -> List[Any]:
__snake_case : Optional[Any] = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase )
__snake_case : Tuple = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : Optional[Any] = model(lowerCamelCase , generator=lowerCamelCase , sample_posterior=lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : int = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : Any , lowerCamelCase : List[str] , lowerCamelCase : List[str] ) -> Tuple:
__snake_case : Any = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase , fpaa=lowerCamelCase )
__snake_case : List[Any] = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : str = model(lowerCamelCase , generator=lowerCamelCase , sample_posterior=lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : Any = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def __snake_case ( self : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : Dict ) -> int:
__snake_case : int = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase )
with torch.no_grad():
__snake_case : int = model(lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : Union[str, Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : List[str] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Any ) -> Optional[Any]:
__snake_case : List[str] = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : str = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case : str = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case : Optional[int] = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : str , lowerCamelCase : Optional[int] , lowerCamelCase : Dict ) -> int:
__snake_case : int = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : List[str] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase )
with torch.no_grad():
__snake_case : Union[str, Any] = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : Optional[Any] = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def __snake_case ( self : Tuple , lowerCamelCase : List[Any] ) -> Tuple:
__snake_case : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : Any = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase )
with torch.no_grad():
__snake_case : str = model.decode(lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : Any = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def __snake_case ( self : List[Any] , lowerCamelCase : Any ) -> Optional[int]:
__snake_case : str = self.get_sd_vae_model()
__snake_case : Union[str, Any] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : List[Any] = model.decode(lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : Dict = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def __snake_case ( self : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Dict ) -> Optional[int]:
__snake_case : str = self.get_sd_vae_model()
__snake_case : int = self.get_sd_image(lowerCamelCase )
__snake_case : int = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : Optional[Any] = model.encode(lowerCamelCase ).latent_dist
__snake_case : Dict = dist.sample(generator=lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case : List[str] = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case : Dict = torch.tensor(lowerCamelCase )
__snake_case : Dict = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=lowerCamelCase )
| 81 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ ) as metadata_file:
a_ = json.load(lowercase__ )
a_ = LukeConfig(use_entity_aware_attention=lowercase__ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
a_ = torch.load(lowercase__ ,map_location="cpu" )["module"]
# Load the entity vocab file
a_ = load_original_entity_vocab(lowercase__ )
# add an entry for [MASK2]
a_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
a_ = AddedToken("<ent>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
a_ = AddedToken("<ent2>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"r" ) as f:
a_ = json.load(lowercase__ )
a_ = "MLukeTokenizer"
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
with open(os.path.join(lowercase__ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
a_ = tokenizer.convert_tokens_to_ids(["@"] )[0]
a_ = tokenizer.convert_tokens_to_ids(["#"] )[0]
a_ = state_dict["embeddings.word_embeddings.weight"]
a_ = word_emb[ent_init_index].unsqueeze(0 )
a_ = word_emb[enta_init_index].unsqueeze(0 )
a_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ = state_dict[bias_name]
a_ = decoder_bias[ent_init_index].unsqueeze(0 )
a_ = decoder_bias[enta_init_index].unsqueeze(0 )
a_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ = F"""encoder.layer.{layer_index}.attention.self."""
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ = state_dict["entity_embeddings.entity_embeddings.weight"]
a_ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ = state_dict["entity_predictions.bias"]
a_ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ = LukeForMaskedLM(config=lowercase__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
a_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
a_ = state_dict[key]
else:
a_ = state_dict[key]
a_ , a_ = model.load_state_dict(lowercase__ ,strict=lowercase__ )
if set(lowercase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ = MLukeTokenizer.from_pretrained(lowercase__ ,task="entity_classification" )
a_ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
a_ = (0, 9)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 33, 768) )
a_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 1, 768) )
a_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
a_ = "Tokyo is the capital of <mask>."
a_ = (24, 30)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
a_ = encoding["input_ids"][0].tolist()
a_ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
a_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase__ )
a_ = outputs.entity_logits[0][0].argmax().item()
a_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = ["[MASK]", "[PAD]", "[UNK]"]
a_ = [json.loads(lowercase__ ) for line in open(lowercase__ )]
a_ = {}
for entry in data:
a_ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ = entity_id
break
a_ = F"""{language}:{entity_name}"""
a_ = entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 685 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
# Initialise PyTorch model
UpperCAmelCase_ = XLNetConfig.from_json_file(lowerCAmelCase__ )
UpperCAmelCase_ = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
UpperCAmelCase_ = finetuning_task
UpperCAmelCase_ = GLUE_TASKS_NUM_LABELS[finetuning_task]
UpperCAmelCase_ = XLNetForSequenceClassification(lowerCAmelCase__ )
elif "squad" in finetuning_task:
UpperCAmelCase_ = finetuning_task
UpperCAmelCase_ = XLNetForQuestionAnswering(lowerCAmelCase__ )
else:
UpperCAmelCase_ = XLNetLMHeadModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
print(f"""Save PyTorch model to {os.path.abspath(lowerCAmelCase__ )}""" )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(f"""Save configuration file to {os.path.abspath(lowerCAmelCase__ )}""" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
lowerCamelCase = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =LxmertTokenizer
_UpperCAmelCase =LxmertTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
super().setUp()
a_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Optional[Any] , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [7, 4, 5, 10, 8, 9])
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = "I was born in 92000, and this is falsé."
a_ = tokenizer.tokenize(a)
a_ = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
a_ = tokenizer.encode(a , add_special_tokens=a)
a_ = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(a)
a_ = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
| 685 | 0 |
"""simple docstring"""
def snake_case_ ( A_ : int = 1_00_00_00 ):
'''simple docstring'''
_lowerCamelCase : str = set(range(3, A_, 2 ) )
primes.add(2 )
for p in range(3, A_, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, A_, A_ ) ) )
_lowerCamelCase : Tuple = [float(A_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(A_, limit + 1, A_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 83 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 |
'''simple docstring'''
import re
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
a_ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowercase__ ,lowercase__ ) )
if __name__ == "__main__":
a_ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 685 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'pix2struct_text_model'
lowercase_ = ['past_key_values']
lowercase_ = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Dict , a_ : Any=5_0244 , a_ : List[str]=768 , a_ : Any=64 , a_ : Optional[int]=2048 , a_ : int=12 , a_ : Dict=12 , a_ : Dict=32 , a_ : Optional[Any]=128 , a_ : Union[str, Any]=0.1 , a_ : str=1e-6 , a_ : int=1.0 , a_ : Any="gelu_new" , a_ : Dict=0 , a_ : Union[str, Any]=False , a_ : Tuple=0 , a_ : Tuple=1 , a_ : int=False , a_ : Union[str, Any]=True , **a_ : Optional[Any] , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = d_kv
SCREAMING_SNAKE_CASE__ : List[Any] = d_ff
SCREAMING_SNAKE_CASE__ : List[str] = num_layers
SCREAMING_SNAKE_CASE__ : List[str] = num_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ : Any = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ : Optional[int] = dropout_rate
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : str = initializer_factor
SCREAMING_SNAKE_CASE__ : List[str] = use_cache
SCREAMING_SNAKE_CASE__ : Any = eos_token_id
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_start_token_id
# for backwards compatibility
SCREAMING_SNAKE_CASE__ : str = dense_act_fn
super().__init__(
pad_token_id=a_ , eos_token_id=a_ , decoder_start_token_id=a_ , tie_word_embeddings=a_ , is_decoder=a_ , **a_ , )
@classmethod
def __lowercase( cls : Tuple , a_ : Union[str, os.PathLike] , **a_ : int )-> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(a_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = cls.get_config_dict(a_ , **a_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
SCREAMING_SNAKE_CASE__ : str = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a_ , **a_ )
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'pix2struct_vision_model'
def __init__( self : Optional[Any] , a_ : str=768 , a_ : str=768 , a_ : str=2048 , a_ : Optional[Any]=64 , a_ : Union[str, Any]=12 , a_ : Any=12 , a_ : str="gelu_new" , a_ : Optional[int]=1e-6 , a_ : Union[str, Any]=0.0 , a_ : Dict=0.0 , a_ : Any=1e-1_0 , a_ : Union[str, Any]=1.0 , a_ : List[Any]=4096 , a_ : Union[str, Any]=32 , a_ : Tuple=128 , **a_ : Optional[Any] , )-> Optional[int]:
"""simple docstring"""
super().__init__(**a_ )
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Dict = patch_embed_hidden_size
SCREAMING_SNAKE_CASE__ : int = d_ff
SCREAMING_SNAKE_CASE__ : Tuple = dropout_rate
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_factor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE__ : str = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dense_act_fn
SCREAMING_SNAKE_CASE__ : Dict = seq_len
SCREAMING_SNAKE_CASE__ : Optional[Any] = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ : Any = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ : Dict = d_kv
@classmethod
def __lowercase( cls : Tuple , a_ : Union[str, os.PathLike] , **a_ : Dict )-> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(a_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = cls.get_config_dict(a_ , **a_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
SCREAMING_SNAKE_CASE__ : List[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a_ , **a_ )
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'pix2struct'
lowercase_ = True
def __init__( self : str , a_ : List[Any]=None , a_ : Dict=None , a_ : int=1.0 , a_ : Optional[int]=0.02 , a_ : Any=False , a_ : Tuple=False , a_ : List[Any]=True , **a_ : List[str] , )-> int:
"""simple docstring"""
super().__init__(tie_word_embeddings=a_ , is_encoder_decoder=a_ , **a_ )
if text_config is None:
SCREAMING_SNAKE_CASE__ : Tuple = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
SCREAMING_SNAKE_CASE__ : int = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
SCREAMING_SNAKE_CASE__ : int = PixaStructTextConfig(**a_ )
SCREAMING_SNAKE_CASE__ : str = PixaStructVisionConfig(**a_ )
SCREAMING_SNAKE_CASE__ : Dict = self.text_config.decoder_start_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = self.text_config.pad_token_id
SCREAMING_SNAKE_CASE__ : int = self.text_config.eos_token_id
SCREAMING_SNAKE_CASE__ : Dict = initializer_factor
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.initializer_range
SCREAMING_SNAKE_CASE__ : str = self.initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = is_vqa
@classmethod
def __lowercase( cls : Any , a_ : PixaStructTextConfig , a_ : PixaStructVisionConfig , **a_ : Any )-> Tuple:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a_ )
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ : Any = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ : List[Any] = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.__class__.model_type
return output
| 85 |
'''simple docstring'''
import argparse
import os
import re
a_ = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
a_ = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __UpperCAmelCase (lowercase__ ,lowercase__ = False ) -> List[Any]:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.read()
a_ = content.split("\n" )
a_ = []
a_ = 0
while line_idx < len(lowercase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a_ = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a_ = sorted(lowercase__ ,key=lambda lowercase__ : _re_identifier.search(lowercase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(lowercase__ ) )
elif "\n".join(lowercase__ ) != content:
return True
def __UpperCAmelCase (lowercase__ = False ) -> Optional[int]:
'''simple docstring'''
a_ = [os.path.join(lowercase__ ,lowercase__ ) for f in os.listdir(lowercase__ ) if f.endswith(".py" )]
a_ = [sort_auto_mapping(lowercase__ ,overwrite=lowercase__ ) for fname in fnames]
if not overwrite and any(lowercase__ ):
a_ = [f for f, d in zip(lowercase__ ,lowercase__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(lowercase__ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 685 | 0 |
def __snake_case ( __UpperCamelCase : int = 50 ):
"""simple docstring"""
A_ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 ,5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }") | 86 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685 | 0 |
from datetime import datetime as dt
import os
from github import Github
_lowerCamelCase : Dict = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
A__ = Github(os.environ['''GITHUB_TOKEN'''] )
A__ = g.get_repo('''huggingface/transformers''' )
A__ = repo.get_issues(state='''open''' )
for issue in open_issues:
A__ = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase_ : i.created_at , reverse=lowercase_ )
A__ = comments[0] if len(lowercase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 87 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[Any]) ->List[str]:
'''simple docstring'''
a_ = [
[],
[],
[],
]
def _lowerCAmelCase ( self: Dict , a: int , a: int) ->None:
'''simple docstring'''
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(a)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__( self: Dict) ->str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any) ->List[str]:
'''simple docstring'''
a_ = []
def _lowerCAmelCase ( self: int , a: int) ->None:
'''simple docstring'''
if len(self.queue) == 1_00:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(a)
def _lowerCAmelCase ( self: List[str]) ->int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
a_ = min(self.queue)
self.queue.remove(a)
return data
def __str__( self: Optional[int]) ->str:
'''simple docstring'''
return str(self.queue)
def __UpperCAmelCase () -> Union[str, Any]:
'''simple docstring'''
a_ = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowercase__ ( A_ ):
__UpperCAmelCase = '''speech_to_text_2'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , SCREAMING_SNAKE_CASE=1_0000 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="relu" , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=1024 , **SCREAMING_SNAKE_CASE , ) -> Dict:
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : str = d_model
_lowerCamelCase : List[str] = decoder_ffn_dim
_lowerCamelCase : Tuple = decoder_layers
_lowerCamelCase : str = decoder_attention_heads
_lowerCamelCase : Dict = dropout
_lowerCamelCase : Union[str, Any] = attention_dropout
_lowerCamelCase : int = activation_dropout
_lowerCamelCase : str = activation_function
_lowerCamelCase : Dict = init_std
_lowerCamelCase : Optional[Any] = decoder_layerdrop
_lowerCamelCase : Dict = use_cache
_lowerCamelCase : str = decoder_layers
_lowerCamelCase : str = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase : Optional[Any] = max_target_positions
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
| 88 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a_ = Dataset.from_dict(lowercase__ )
return dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = get_dataset()
a_ = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = get_dataset()
a_ , a_ = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2)
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a)
| 685 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
SCREAMING_SNAKE_CASE : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowercase : Tuple = model_type_to_module_name(lowerCamelCase_ )
_lowercase : Optional[int] = importlib.import_module(F'''.{module_name}''' , 'transformers.models' )
try:
return getattr(lowerCamelCase_ , lowerCamelCase_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowerCamelCase_ , '__name__' , lowerCamelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowercase : str = importlib.import_module('transformers' )
if hasattr(lowerCamelCase_ , lowerCamelCase_ ):
return getattr(lowerCamelCase_ , lowerCamelCase_ )
return None
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , **lowerCamelCase_ , ) -> Dict:
_lowercase : List[Any] = get_file_from_repo(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(lowerCamelCase_ , encoding='utf-8' ) as reader:
return json.load(lowerCamelCase_ )
class _lowerCamelCase:
def __init__( self) -> Union[str, Any]:
"""simple docstring"""
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.')
@classmethod
@replace_list_option_in_docstrings(lowerCamelCase)
def UpperCamelCase ( cls, lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : str = kwargs.pop('config', lowerCamelCase)
_lowercase : List[Any] = kwargs.pop('trust_remote_code', lowerCamelCase)
_lowercase : List[Any] = True
_lowercase , _lowercase : List[str] = FeatureExtractionMixin.get_feature_extractor_dict(lowerCamelCase, **lowerCamelCase)
_lowercase : Tuple = config_dict.get('feature_extractor_type', lowerCamelCase)
_lowercase : List[Any] = None
if "AutoFeatureExtractor" in config_dict.get('auto_map', {}):
_lowercase : Union[str, Any] = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Dict = AutoConfig.from_pretrained(lowerCamelCase, **lowerCamelCase)
# It could be in `config.feature_extractor_type``
_lowercase : Optional[int] = getattr(lowerCamelCase, 'feature_extractor_type', lowerCamelCase)
if hasattr(lowerCamelCase, 'auto_map') and "AutoFeatureExtractor" in config.auto_map:
_lowercase : Union[str, Any] = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
_lowercase : str = feature_extractor_class_from_name(lowerCamelCase)
_lowercase : Tuple = feature_extractor_auto_map is not None
_lowercase : int = feature_extractor_class is not None or type(lowerCamelCase) in FEATURE_EXTRACTOR_MAPPING
_lowercase : int = resolve_trust_remote_code(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
if has_remote_code and trust_remote_code:
_lowercase : Dict = get_class_from_dynamic_module(
lowerCamelCase, lowerCamelCase, **lowerCamelCase)
_lowercase : Optional[int] = kwargs.pop('code_revision', lowerCamelCase)
if os.path.isdir(lowerCamelCase):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowerCamelCase, **lowerCamelCase)
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowerCamelCase, **lowerCamelCase)
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowerCamelCase) in FEATURE_EXTRACTOR_MAPPING:
_lowercase : str = FEATURE_EXTRACTOR_MAPPING[type(lowerCamelCase)]
return feature_extractor_class.from_dict(lowerCamelCase, **lowerCamelCase)
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}''')
@staticmethod
def UpperCamelCase ( lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(lowerCamelCase, lowerCamelCase)
| 89 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , *a: str , **a: Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__UpperCAmelCase = random.Random()
def _snake_case ( A , A=1.0 , A=None , A=None ) -> int:
if rng is None:
lowerCAmelCase__ = global_rng
lowerCAmelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=7 , lowerCamelCase_=4_00 , lowerCamelCase_=20_00 , lowerCamelCase_=24 , lowerCamelCase_=24 , lowerCamelCase_=0.0 , lowerCamelCase_=1_60_00 , lowerCamelCase_=True , lowerCamelCase_=True , ) -> Any:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = min_seq_length
lowerCAmelCase__ = max_seq_length
lowerCAmelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase__ = feature_size
lowerCAmelCase__ = num_mel_bins
lowerCAmelCase__ = padding_value
lowerCAmelCase__ = sampling_rate
lowerCAmelCase__ = return_attention_mask
lowerCAmelCase__ = do_normalize
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False , lowerCamelCase_=False ) -> List[str]:
def _flatten(lowerCamelCase_ ):
return list(itertools.chain(*lowerCamelCase_ ) )
if equal_length:
lowerCAmelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase__ = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : str = SpeechaTextFeatureExtractor if is_speech_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = SpeechaTextFeatureExtractionTester(self )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tuple:
self.assertTrue(np.all(np.mean(lowerCamelCase_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase_ , axis=0 ) - 1 ) < 1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test feature size
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowerCAmelCase__ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
lowerCAmelCase__ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# Test batched
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowerCAmelCase__ = np.asarray(lowerCamelCase_ )
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCAmelCase__ = [None, 16, None]
for max_length, padding in zip(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = feature_extractor(
lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ )
lowerCAmelCase__ = inputs.input_features
lowerCAmelCase__ = inputs.attention_mask
lowerCAmelCase__ = [np.sum(lowerCamelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCAmelCase__ = [None, 16, None]
for max_length, padding in zip(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = feature_extractor(
lowerCamelCase_ , max_length=lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ )
lowerCAmelCase__ = inputs.input_features
lowerCAmelCase__ = inputs.attention_mask
lowerCAmelCase__ = [np.sum(lowerCamelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = feature_extractor(
lowerCamelCase_ , padding='''max_length''' , max_length=4 , truncation=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ , )
lowerCAmelCase__ = inputs.input_features
lowerCAmelCase__ = inputs.attention_mask
lowerCAmelCase__ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = feature_extractor(
lowerCamelCase_ , padding='''longest''' , max_length=4 , truncation=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ , )
lowerCAmelCase__ = inputs.input_features
lowerCAmelCase__ = inputs.attention_mask
lowerCAmelCase__ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = feature_extractor(
lowerCamelCase_ , padding='''longest''' , max_length=16 , truncation=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ , )
lowerCAmelCase__ = inputs.input_features
lowerCAmelCase__ = inputs.attention_mask
lowerCAmelCase__ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
import torch
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = np.random.rand(1_00 , 32 ).astype(np.floataa )
lowerCAmelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase__ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCAmelCase__ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Dict:
from datasets import load_dataset
lowerCAmelCase__ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowerCAmelCase__ = ds.sort('''id''' ).select(range(lowerCamelCase_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# fmt: off
lowerCAmelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
lowerCAmelCase__ = self._load_datasamples(1 )
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowerCamelCase_ , atol=1e-4 ) ) | 90 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Any , a: Path , a: Union[str, None] = None , a: Union[List[str], None] = None , a: Union[str, List[str], None] = None , a: bool = True , ) ->Optional[Any]:
'''simple docstring'''
a_ = [file for file in os.listdir(a) if os.path.isfile(os.path.join(a , a))]
if identifier is not None:
a_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a):
for n_ in n_identifier:
a_ = [file for file in files if n_ not in file]
else:
a_ = [file for file in files if n_identifier not in file]
a_ = ignore_files or []
ignore_files.append("__init__.py")
a_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a)
if only_modules:
a_ = file.split(".")[0]
try:
a_ = getattr(a , a)
a_ = doctest.DocTestSuite(a)
a_ = unittest.TextTestRunner().run(a)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
a_ = doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "modeling"
a_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "tokenization"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "configuration"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a)
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = Path("docs/source")
a_ = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a)
| 685 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Dict ,A_ : Optional[Any]=3 ,A_ : Optional[int]=32 ,A_ : Dict=3 ,A_ : Tuple=10 ,A_ : Tuple=[8, 16, 32, 64] ,A_ : Optional[int]=[1, 1, 2, 1] ,A_ : int=True ,A_ : Dict=True ,A_ : Union[str, Any]="relu" ,A_ : Dict=3 ,A_ : Optional[int]=None ,A_ : int=["stage2", "stage3", "stage4"] ,A_ : Tuple=[2, 3, 4] ,A_ : List[str]=1 ,) -> str:
A = parent
A = batch_size
A = image_size
A = num_channels
A = embeddings_size
A = hidden_sizes
A = depths
A = is_training
A = use_labels
A = hidden_act
A = num_labels
A = scope
A = len(A_ )
A = out_features
A = out_indices
A = num_groups
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.num_labels )
A = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
return BitConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,out_features=self.out_features ,out_indices=self.out_indices ,num_groups=self.num_groups ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : Any ,A_ : List[Any] ) -> Optional[Any]:
A = BitModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Union[str, Any] ,A_ : int ,A_ : Tuple ) -> Tuple:
A = self.num_labels
A = BitForImageClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : str ,A_ : Optional[Any] ) -> str:
A = BitBackbone(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A = None
A = BitBackbone(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowerCamelCase: List[Any] = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: Optional[int] = False
_lowerCamelCase: List[str] = False
_lowerCamelCase: int = False
_lowerCamelCase: Any = False
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = BitModelTester(self )
A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
return
@unittest.skip(reason='Bit does not output attentions' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
pass
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(config=A_ )
for name, module in model.named_modules():
if isinstance(A_ ,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) ,msg=F'Parameter {name} of model {model_class} seems not properly initialized' ,)
self.assertTrue(
torch.all(module.bias == 0 ) ,msg=F'Parameter {name} of model {model_class} seems not properly initialized' ,)
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
def check_hidden_states_output(A_ : Any ,A_ : List[str] ,A_ : int ):
A = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(A_ ,A_ ) )
A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A = self.model_tester.num_stages
self.assertEqual(len(A_ ) ,expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A = layer_type
A = True
check_hidden_states_output(A_ ,A_ ,A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
check_hidden_states_output(A_ ,A_ ,A_ )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = BitModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _snake_case ( ):
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A_ )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=A_ ,return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
A = model(**A_ )
# verify the logits
A = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,A_ )
A = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A_ ,atol=1e-4 ) )
@require_torch
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = (BitBackbone,) if is_torch_available() else ()
_lowerCamelCase: List[str] = BitConfig
_lowerCamelCase: Optional[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
A = BitModelTester(self ) | 91 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 100 ) -> int:
'''simple docstring'''
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> Dict:
if hor == 128:
lowercase : Dict =('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
lowercase : Tuple =(32, 128, 256)
lowercase : Optional[int] =('''UpResnetBlock1D''', '''UpResnetBlock1D''')
elif hor == 32:
lowercase : Union[str, Any] =('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
lowercase : str =(32, 64, 128, 256)
lowercase : Any =('''UpResnetBlock1D''', '''UpResnetBlock1D''', '''UpResnetBlock1D''')
lowercase : str =torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowercase : Optional[int] =model.state_dict()
lowercase : Optional[Any] ={
'''down_block_types''': down_block_types,
'''block_out_channels''': block_out_channels,
'''up_block_types''': up_block_types,
'''layers_per_block''': 1,
'''use_timestep_embedding''': True,
'''out_block_type''': '''OutConv1DBlock''',
'''norm_num_groups''': 8,
'''downsample_each_block''': False,
'''in_channels''': 14,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''sample_size''': 65536,
'''mid_block_type''': '''MidResTemporalBlock1D''',
'''act_fn''': '''mish''',
}
lowercase : List[Any] =UNetaDModel(**__magic_name__ )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowercase : Dict =dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowercase : List[str] =state_dict.pop(__magic_name__ )
hf_value_function.load_state_dict(__magic_name__ )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , '''w''' ) as f:
json.dump(__magic_name__ , __magic_name__ )
def _lowerCAmelCase ( ) -> Any:
lowercase : List[str] ={
'''in_channels''': 14,
'''down_block_types''': ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D'''),
'''up_block_types''': (),
'''out_block_type''': '''ValueFunction''',
'''mid_block_type''': '''ValueFunctionMidBlock1D''',
'''block_out_channels''': (32, 64, 128, 256),
'''layers_per_block''': 1,
'''downsample_each_block''': True,
'''sample_size''': 65536,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''use_timestep_embedding''': True,
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''norm_num_groups''': 8,
'''act_fn''': '''mish''',
}
lowercase : int =torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' )
lowercase : Optional[int] =model
lowercase : Union[str, Any] =UNetaDModel(**__magic_name__ )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowercase : Optional[Any] =dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowercase : Union[str, Any] =state_dict.pop(__magic_name__ )
hf_value_function.load_state_dict(__magic_name__ )
torch.save(hf_value_function.state_dict() , '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' )
with open('''hub/hopper-medium-v2/value_function/config.json''' , '''w''' ) as f:
json.dump(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 92 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =(PNDMScheduler,)
_UpperCAmelCase =(('''num_inference_steps''', 50),)
def _lowerCAmelCase ( self: int , **a: Optional[int]) ->Any:
'''simple docstring'''
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a)
return config
def _lowerCAmelCase ( self: Any , a: Tuple=0 , **a: Any) ->Any:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
new_scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Union[str, Any] , a: str=0 , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
# copy over dummy past residuals
new_scheduler.set_timesteps(a)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: Dict , **a: int) ->Any:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.prk_timesteps):
a_ = model(a , a)
a_ = scheduler.step_prk(a , a , a).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ = model(a , a)
a_ = scheduler.step_plms(a , a , a).prev_sample
return sample
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , "set_timesteps"):
scheduler.set_timesteps(a)
elif num_inference_steps is not None and not hasattr(a , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , 0 , a , **a).prev_sample
a_ = scheduler.step_prk(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step_plms(a , 0 , a , **a).prev_sample
a_ = scheduler.step_plms(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(steps_offset=1)
a_ = scheduler_class(**a)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]) , )
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=a , beta_end=a)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=a)
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = 27
for scheduler_class in self.scheduler_classes:
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ = scheduler.step_prk(a , a , a).prev_sample
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
with self.assertRaises(a):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.full_loop()
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 685 | 0 |
"""simple docstring"""
import math
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 ) ->list:
"""simple docstring"""
lowerCAmelCase__ :List[str] = end or len(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :List[Any] = i
lowerCAmelCase__ :Any = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCAmelCase__ :str = array[temp_index - 1]
temp_index -= 1
lowerCAmelCase__ :Tuple = temp_index_value
return array
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->None: # Max Heap
"""simple docstring"""
lowerCAmelCase__ :Any = index
lowerCAmelCase__ :Optional[int] = 2 * index + 1 # Left Node
lowerCAmelCase__ :int = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCAmelCase__ :Dict = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCAmelCase__ :str = right_index
if largest != index:
lowerCAmelCase__ , lowerCAmelCase__ :Dict = array[largest], array[index]
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->list:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
lowerCAmelCase__ , lowerCAmelCase__ :int = array[0], array[i]
heapify(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE )
return array
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = low
lowerCAmelCase__ :int = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = array[j], array[i]
i += 1
def __A (_SCREAMING_SNAKE_CASE ) ->list:
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
return array
lowerCAmelCase__ :Dict = 2 * math.ceil(math.loga(len(_SCREAMING_SNAKE_CASE ) ) )
lowerCAmelCase__ :Tuple = 16
return intro_sort(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_SCREAMING_SNAKE_CASE )
max_depth -= 1
lowerCAmelCase__ :Any = median_of_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
lowerCAmelCase__ :Tuple = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
intro_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = p
return insertion_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = input("""Enter numbers separated by a comma : """).strip()
__A = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 93 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "bird"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
a_ = pipe.prepare_image_inputs([canny_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "Chef in the kitchen"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
a_ = pipe.prepare_image_inputs([pose_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 685 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( __A : list[int] ) -> list[int]: # This function is recursive
"""simple docstring"""
lowercase : str =len(__A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase : Any =array[0]
lowercase : List[Any] =False
lowercase : Union[str, Any] =1
lowercase : list[int] =[]
while not is_found and i < array_length:
if array[i] < pivot:
lowercase : Optional[int] =True
lowercase : Dict =[element for element in array[i:] if element >= array[i]]
lowercase : Union[str, Any] =longest_subsequence(__A )
if len(__A ) > len(__A ):
lowercase : int =temp_array
else:
i += 1
lowercase : str =[element for element in array[1:] if element >= pivot]
lowercase : int =[pivot, *longest_subsequence(__A )]
if len(__A ) > len(__A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 1000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 ,lowercase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ :
def __init__( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=13 , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : int=True , lowerCAmelCase_ : str=99 , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Dict=37 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : int=512 , lowerCAmelCase_ : int=16 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Dict=0.0_2 , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : List[str]=None , ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : List[str] = seq_length
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : Optional[int] = use_input_mask
UpperCAmelCase_ : Union[str, Any] = use_token_type_ids
UpperCAmelCase_ : List[str] = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = type_vocab_size
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Optional[int] = num_labels
UpperCAmelCase_ : Any = num_choices
UpperCAmelCase_ : List[str] = scope
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Any = None
if self.use_input_mask:
UpperCAmelCase_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = BioGptModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
UpperCAmelCase_ : Any = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , ) -> Optional[int]:
UpperCAmelCase_ : Any = BioGptForCausalLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Dict = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , *lowerCAmelCase_ : List[str] ) -> Tuple:
UpperCAmelCase_ : Optional[int] = BioGptModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# create attention mask
UpperCAmelCase_ : int = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.seq_length // 2
UpperCAmelCase_ : str = 0
# first forward pass
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCAmelCase_ : Union[str, Any] = ids_tensor((1,) , lowerCAmelCase_ ).item() + 1
UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCAmelCase_ : Tuple = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : List[str] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCAmelCase_ )] , dim=1 , )
# get two different outputs
UpperCAmelCase_ : Tuple = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )["last_hidden_state"]
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )["last_hidden_state"]
# select random slice
UpperCAmelCase_ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : str = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase_ : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = BioGptModel(config=lowerCAmelCase_ ).to(lowerCAmelCase_ ).eval()
UpperCAmelCase_ : Union[str, Any] = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase_ )
# first forward pass
UpperCAmelCase_ : List[str] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : str = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCAmelCase_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : Tuple = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCAmelCase_ : Dict = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )["last_hidden_state"]
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )[
"last_hidden_state"
]
# select random slice
UpperCAmelCase_ : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , *lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any]=False ) -> Dict:
UpperCAmelCase_ : int = BioGptForCausalLM(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Tuple , *lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : str = BioGptModel(lowerCAmelCase_ )
UpperCAmelCase_ : str = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , *lowerCAmelCase_ : Dict ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.num_labels
UpperCAmelCase_ : int = BioGptForTokenClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Dict = config_and_inputs
UpperCAmelCase_ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (__A , __A , __A , unittest.TestCase ):
__magic_name__ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__magic_name__ = (BioGptForCausalLM,) if is_torch_available() else ()
__magic_name__ = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
UpperCAmelCase_ : Dict = BioGptModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : Union[str, Any] = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCAmelCase_ , gradient_checkpointing=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
UpperCAmelCase_ : Tuple = "left"
# Define PAD Token = EOS Token = 50256
UpperCAmelCase_ : Any = tokenizer.eos_token
UpperCAmelCase_ : int = model.config.eos_token_id
# use different length sentences to test batching
UpperCAmelCase_ : Dict = [
"Hello, my dog is a little",
"Today, I",
]
UpperCAmelCase_ : Tuple = tokenizer(lowerCAmelCase_ , return_tensors="pt" , padding=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = inputs["input_ids"].to(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = model.generate(
input_ids=lowerCAmelCase_ , attention_mask=inputs["attention_mask"].to(lowerCAmelCase_ ) , )
UpperCAmelCase_ : Dict = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = model.generate(input_ids=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
UpperCAmelCase_ : List[Any] = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(lowerCAmelCase_ )
UpperCAmelCase_ : Any = model.generate(input_ids=lowerCAmelCase_ , max_length=model.config.max_length - num_paddings )
UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Dict = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , [non_padded_sentence, padded_sentence] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : str = BioGptModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : Optional[Any] = input_dict["input_ids"]
UpperCAmelCase_ : int = input_ids.ne(1 ).to(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : Dict = BioGptForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : str = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = 3
UpperCAmelCase_ : Dict = "multi_label_classification"
UpperCAmelCase_ : Dict = input_dict["input_ids"]
UpperCAmelCase_ : Optional[int] = input_ids.ne(1 ).to(lowerCAmelCase_ )
UpperCAmelCase_ : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ : List[Any] = BioGptForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : str = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
UpperCAmelCase_ : int = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[2, 4_805, 9, 656, 21]] )
UpperCAmelCase_ : List[str] = model(lowerCAmelCase_ )[0]
UpperCAmelCase_ : Union[str, Any] = 42_384
UpperCAmelCase_ : List[str] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
UpperCAmelCase_ : Tuple = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(lowerCAmelCase_ )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = tokenizer("COVID-19 is" , return_tensors="pt" ).to(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = model.generate(
**lowerCAmelCase_ , min_length=100 , max_length=1_024 , num_beams=5 , early_stopping=lowerCAmelCase_ , )
UpperCAmelCase_ : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 95 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 ,lowercase__ ,2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def __UpperCAmelCase (lowercase__ = 999966663333 ) -> int:
'''simple docstring'''
a_ = math.floor(math.sqrt(lowercase__ ) ) + 100
a_ = prime_sieve(lowercase__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 685 | 0 |
"""simple docstring"""
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 96 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = UniSpeechSatForSequenceClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["projector.weight"]
a_ = downstream_dict["projector.bias"]
a_ = downstream_dict["model.post_net.linear.weight"]
a_ = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["model.linear.weight"]
a_ = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = UniSpeechSatForXVector.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["connector.weight"]
a_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
a_ = checkpoint["Downstream"]
a_ = UniSpeechSatConfig.from_pretrained(lowercase__ )
a_ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ ,return_attention_mask=lowercase__ ,do_normalize=lowercase__ )
a_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ = convert_classification(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ = convert_diarization(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForXVector" ):
a_ = convert_xvector(lowercase__ ,lowercase__ ,lowercase__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 685 | 0 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def a ( snake_case__: Optional[int] , snake_case__: Optional[Any] , snake_case__: int , snake_case__: Dict=5 ):
'''simple docstring'''
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''' ) == 1
lowercase_ = torch.tensor(tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) ).unsqueeze(0 ) # Batch size 1
lowercase_ = model(snake_case__ )[0] # The last hidden-state is the first element of the output tuple
lowercase_ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
lowercase_ = logits[0, masked_index, :]
lowercase_ = logits.softmax(dim=0 )
lowercase_ , lowercase_ = prob.topk(k=snake_case__ , dim=0 )
lowercase_ = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(snake_case__ ) )] )
lowercase_ = tokenizer.mask_token
lowercase_ = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
lowercase_ = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(snake_case__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(snake_case__ ) , snake_case__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(snake_case__ , snake_case__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__a = CamembertTokenizer.from_pretrained('camembert-base')
__a = CamembertForMaskedLM.from_pretrained('camembert-base')
model.eval()
__a = 'Le camembert est <mask> :)'
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 97 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 685 | 0 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def a__ ( lowercase : Tuple, lowercase : List[str], lowercase : Optional[int], lowercase : List[str], lowercase : List[str]=True, lowercase : str="pt" ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {'''add_prefix_space''': True} if isinstance(lowercase, lowercase ) and not line.startswith(''' ''' ) else {}
_UpperCamelCase = padding_side
return tokenizer(
[line], max_length=lowercase, padding='''max_length''' if pad_to_max_length else None, truncation=lowercase, return_tensors=lowercase, add_special_tokens=lowercase, **lowercase, )
def a__ ( lowercase : str, lowercase : int, lowercase : Tuple=None, ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = input_ids.ne(lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str="train" , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Dict="" , ) -> Any:
'''simple docstring'''
super().__init__()
_UpperCamelCase = Path(lowerCAmelCase__ ).joinpath(type_path + '''.source''' )
_UpperCamelCase = Path(lowerCAmelCase__ ).joinpath(type_path + '''.target''' )
_UpperCamelCase = self.get_char_lens(self.src_file )
_UpperCamelCase = max_source_length
_UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
_UpperCamelCase = tokenizer
_UpperCamelCase = prefix
if n_obs is not None:
_UpperCamelCase = self.src_lens[:n_obs]
_UpperCamelCase = src_lang
_UpperCamelCase = tgt_lang
def __len__( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : Optional[Any] , lowerCAmelCase__ : Any ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
_UpperCamelCase = index + 1 # linecache starts at 1
_UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ) , lowerCAmelCase__ ).rstrip('''\n''' )
_UpperCamelCase = linecache.getline(str(self.tgt_file ) , lowerCAmelCase__ ).rstrip('''\n''' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
)
_UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
_UpperCamelCase = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_source_length , '''right''' )
_UpperCamelCase = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_target_length , '''right''' )
_UpperCamelCase = source_inputs['''input_ids'''].squeeze()
_UpperCamelCase = target_inputs['''input_ids'''].squeeze()
_UpperCamelCase = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case__ ( lowerCAmelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
return [len(lowerCAmelCase__ ) for x in Path(lowerCAmelCase__ ).open().readlines()]
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[str] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
_UpperCamelCase = torch.stack([x['''input_ids'''] for x in batch] )
_UpperCamelCase = torch.stack([x['''attention_mask'''] for x in batch] )
_UpperCamelCase = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
_UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
_UpperCamelCase = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_UpperCamelCase = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowercase__ : Union[str, Any] = getLogger(__name__)
def a__ ( lowercase : List[List] ) -> Optional[int]:
"""simple docstring"""
return list(itertools.chain.from_iterable(lowercase ) )
def a__ ( lowercase : str ) -> None:
"""simple docstring"""
_UpperCamelCase = get_git_info()
save_json(lowercase, os.path.join(lowercase, '''git_log.json''' ) )
def a__ ( lowercase : Optional[int], lowercase : List[str], lowercase : str=4, **lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
with open(lowercase, '''w''' ) as f:
json.dump(lowercase, lowercase, indent=lowercase, **lowercase )
def a__ ( lowercase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
with open(lowercase ) as f:
return json.load(lowercase )
def a__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = git.Repo(search_parent_directories=lowercase )
_UpperCamelCase = {
'''repo_id''': str(lowercase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def a__ ( lowercase : Callable, lowercase : Iterable ) -> List:
"""simple docstring"""
return list(map(lowercase, lowercase ) )
def a__ ( lowercase : List[Any], lowercase : Dict ) -> Optional[int]:
"""simple docstring"""
with open(lowercase, '''wb''' ) as f:
return pickle.dump(lowercase, lowercase )
def a__ ( lowercase : Tuple ) -> Tuple:
"""simple docstring"""
def remove_articles(lowercase : Tuple ):
return re.sub(r'''\b(a|an|the)\b''', ''' ''', lowercase )
def white_space_fix(lowercase : Tuple ):
return " ".join(text.split() )
def remove_punc(lowercase : Optional[int] ):
_UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase ) ) ) )
def a__ ( lowercase : Tuple, lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
_UpperCamelCase = normalize_answer(lowercase ).split()
_UpperCamelCase = normalize_answer(lowercase ).split()
_UpperCamelCase = Counter(lowercase ) & Counter(lowercase )
_UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCamelCase = 1.0 * num_same / len(lowercase )
_UpperCamelCase = 1.0 * num_same / len(lowercase )
_UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( lowercase : Tuple, lowercase : Any ) -> List[str]:
"""simple docstring"""
return normalize_answer(lowercase ) == normalize_answer(lowercase )
def a__ ( lowercase : List[str], lowercase : List[str] ) -> Dict:
"""simple docstring"""
assert len(lowercase ) == len(lowercase )
_UpperCamelCase = 0
for hypo, pred in zip(lowercase, lowercase ):
em += exact_match_score(lowercase, lowercase )
if len(lowercase ) > 0:
em /= len(lowercase )
return {"em": em}
def a__ ( lowercase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def a__ ( lowercase : int, lowercase : List[Any], lowercase : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCamelCase = '''dropout_rate'''
for p in extra_params:
if getattr(lowercase, lowercase, lowercase ):
if not hasattr(lowercase, lowercase ) and not hasattr(lowercase, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(lowercase ) )
delattr(lowercase, lowercase )
continue
_UpperCamelCase = p if hasattr(lowercase, lowercase ) else equivalent_param[p]
setattr(lowercase, lowercase, getattr(lowercase, lowercase ) )
delattr(lowercase, lowercase )
return hparams, config
| 98 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = json.loads(f.read() )
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ = b
a_ = idx
for wd in b:
a_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , a: Union[str, Any] , a: Optional[int] , a: List[str]="<|endoftext|>" , a: Union[str, Any]="<|endoftext|>" , a: Dict="<|startoftext|>" , a: Dict="<|endoftext|>" , a: Union[str, Any]=False , **a: Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ = do_clean_text
a_ , a_ , a_ , a_ = load_vocab_and_emoji(a , a)
a_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return len(self.raw_vocab)
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _lowerCAmelCase ( self: Union[str, Any] , a: Any) ->Dict:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text)
def _lowerCAmelCase ( self: int , a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a)
def _lowerCAmelCase ( self: Optional[int] , a: Any) ->str:
'''simple docstring'''
a_ = "".join(a).strip()
return out_string
def _lowerCAmelCase ( self: Any , a: "Conversation") ->List[int]:
'''simple docstring'''
a_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
a_ = input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self: int , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = 0
if os.path.isdir(a):
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ = token_index
writer.write(",".join(a) + "\n")
index += 1
with open(a , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , a)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: Any , a: Union[str, Any] , a: Any) ->List[Any]:
'''simple docstring'''
a_ = vocab # same as swe
a_ = ids_to_tokens # same as bpe
a_ = emoji
a_ = np.max([len(a) for w in self.vocab.keys()])
a_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Dict) ->Any:
'''simple docstring'''
return len(self.ids_to_tokens)
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple) ->Any:
'''simple docstring'''
a_ = self.content_repattera.sub("<URL>" , a)
a_ = self.content_repattera.sub("<EMAIL>" , a)
a_ = self.content_repattera.sub("<TEL>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<PRICE>" , a)
a_ = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _lowerCAmelCase ( self: Any , a: int , a: Optional[int]=False) ->List[str]:
'''simple docstring'''
a_ = text.replace(" " , "<SP>")
a_ = text.replace(" " , "<SP>")
a_ = text.replace("\r\n" , "<BR>")
a_ = text.replace("\n" , "<BR>")
a_ = text.replace("\r" , "<BR>")
a_ = text.replace("\t" , "<TAB>")
a_ = text.replace("—" , "ー")
a_ = text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ = text.replace(a , a)
if clean:
a_ = self.clean_text(a)
def check_simbol(a: Dict):
a_ = x.encode()
if len(a) == 1 and len(a) == 2:
a_ = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a: str):
a_ = x.encode()
if len(a) == 1 and len(a) == 3:
a_ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
a_ = 0
a_ = []
while pos < len(a):
a_ = min(len(a) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ = [] # (token_id, token, pos)
for e in range(a , a , -1):
a_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a) > 2:
a_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(a) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ = sorted(a , key=lambda a: x[0])[0]
result.append(a)
a_ = e
else:
a_ = pos + 1
a_ = text[pos:end]
if check_simbol(a):
result.append("<KIGOU>")
elif checkuae(a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ = end
return result
def _lowerCAmelCase ( self: int , a: List[Any] , a: Any="\n") ->str:
'''simple docstring'''
a_ = []
a_ = []
a_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(a)
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = "".join(a)
return text
| 685 | 0 |
def a (lowerCAmelCase__ = 4_000_000 ):
__a = [0, 1]
__a = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__a = 0
for j in range(len(lowerCAmelCase__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 99 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , a: Optional[Any] , a: Dict=13 , a: List[str]=7 , a: Optional[Any]=True , a: int=True , a: Any=True , a: Optional[int]=True , a: int=True , a: Dict=False , a: Union[str, Any]=False , a: Dict=False , a: List[str]=2 , a: Union[str, Any]=99 , a: List[Any]=0 , a: Optional[int]=32 , a: List[str]=5 , a: int=4 , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Optional[int]=5_12 , a: str=12 , a: Dict=2 , a: Any=0.02 , a: Optional[int]=3 , a: str=4 , a: Optional[int]="last" , a: Tuple=None , a: Any=None , ) ->int:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_lengths
a_ = use_token_type_ids
a_ = use_labels
a_ = gelu_activation
a_ = sinusoidal_embeddings
a_ = causal
a_ = asm
a_ = n_langs
a_ = vocab_size
a_ = n_special
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = summary_type
a_ = use_proj
a_ = scope
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_input_lengths:
a_ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , 2).float()
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: List[Any] , a: List[Any] , a: Optional[int] , a: int , a: str , a: Any , a: str , a: List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModel(config=a)
model.to(a)
model.eval()
a_ = model(a , lengths=a , langs=a)
a_ = model(a , langs=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Optional[Any] , a: Dict , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Any , a: Tuple , a: str , a: List[str] , ) ->Dict:
'''simple docstring'''
a_ = FlaubertWithLMHeadModel(a)
model.to(a)
model.eval()
a_ = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: Optional[Any] , a: List[Any] , a: List[str] , a: List[str] , a: List[str] , a: Optional[Any] , a: str , a: Union[str, Any] , ) ->str:
'''simple docstring'''
a_ = FlaubertForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Optional[Any] , a: Any , a: Dict , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , ) ->int:
'''simple docstring'''
a_ = FlaubertForQuestionAnswering(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((a_) , ) = result_with_labels.to_tuple()
a_ = model(a , start_positions=a , end_positions=a)
((a_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Union[str, Any] , a: Any , a: Tuple , a: Union[str, Any] , a: int , a: int , a: Dict , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self: str , a: List[str] , a: Dict , a: Tuple , a: Optional[Any] , a: Any , a: Any , a: str , a: str , a: Optional[Any] , ) ->List[Any]:
'''simple docstring'''
a_ = self.num_labels
a_ = FlaubertForTokenClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: Dict , a: Tuple , a: List[Any] , a: Dict , a: Optional[Any] , a: Optional[Any] , a: Optional[Any] , a: Union[str, Any] , a: List[str] , a: Tuple , ) ->Dict:
'''simple docstring'''
a_ = self.num_choices
a_ = FlaubertForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: Any) ->List[Any]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self: Optional[Any] , a: List[Any] , a: Any , a: List[str] , a: Union[str, Any] , a: int) ->int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: List[Any] , a: Tuple=False) ->List[Any]:
'''simple docstring'''
a_ = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModelTester(self)
a_ = ConfigTester(self , config_class=a , emb_dim=37)
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a)
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a)
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a)
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a)
@slow
def _lowerCAmelCase ( self: Any) ->Any:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = FlaubertModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ = True
a_ = model_class(config=a)
a_ = self._prepare_for_class(a , a)
a_ = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt"))
a_ = torch.jit.load(os.path.join(a , "traced_model.pt") , map_location=a)
loaded(inputs_dict["input_ids"].to(a) , inputs_dict["attention_mask"].to(a))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
a_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
a_ = model(a)[0]
a_ = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4))
| 685 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_A : Union[str, Any] = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = ["""OwlViTFeatureExtractor"""]
_A : Optional[Any] = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_A : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 100 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase (lowercase__ = 10001 ) -> int:
'''simple docstring'''
try:
a_ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a_ = []
a_ = 2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __lowercase (datasets.BuilderConfig ):
"""simple docstring"""
_UpperCAmelCase = None
class __lowercase (datasets.ArrowBasedBuilder ):
"""simple docstring"""
_UpperCAmelCase = PandasConfig
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
SCREAMING_SNAKE_CASE_ : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
SCREAMING_SNAKE_CASE_ : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE_ : Optional[Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
SCREAMING_SNAKE_CASE_ : Dict = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE_ : Optional[int] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={'files': files} ) )
return splits
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE_ : List[str] = table_cast(lowerCAmelCase__ , self.config.features.arrow_schema )
return pa_table
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
with open(lowerCAmelCase__ , 'rb' ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pa.Table.from_pandas(pd.read_pickle(lowerCAmelCase__ ) )
yield i, self._cast_table(lowerCAmelCase__ )
| 101 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685 | 0 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[Any] = old_name
if "patch_embed" in old_name:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = old_name.split(""".""" )
if layer == "0":
UpperCamelCase : int = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
UpperCamelCase : Any = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
UpperCamelCase : List[str] = old_name.replace("""3""" , """convolution2""" )
else:
UpperCamelCase : Union[str, Any] = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(r"""\d\.\d""" , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = r"""\b\d{2}\b"""
if bool(re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ):
UpperCamelCase : int = re.search(r"""\d\.\d\d.""" , SCREAMING_SNAKE_CASE ).group()
else:
UpperCamelCase : Dict = re.search(r"""\d\.\d.""" , SCREAMING_SNAKE_CASE ).group()
if int(match[0] ) < 6:
UpperCamelCase : Optional[Any] = old_name.replace(SCREAMING_SNAKE_CASE , """""" )
UpperCamelCase : Optional[Any] = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
UpperCamelCase : Tuple = """intermediate_stages.""" + trimmed_name
else:
UpperCamelCase : List[str] = old_name.replace(SCREAMING_SNAKE_CASE , """""" )
if int(match[2] ) < num_meta4D_last_stage:
UpperCamelCase : str = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
UpperCamelCase : int = str(int(match[2] ) - num_meta4D_last_stage )
UpperCamelCase : Optional[int] = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
UpperCamelCase : str = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
UpperCamelCase : int = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
UpperCamelCase : List[str] = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
UpperCamelCase : Any = trimmed_name.replace("""fc2""" , """linear_out""" )
UpperCamelCase : List[str] = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(r""".\d.""" , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[Any] = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
UpperCamelCase : Optional[int] = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
UpperCamelCase : str = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
UpperCamelCase : Tuple = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
UpperCamelCase : Tuple = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
UpperCamelCase : Any = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
UpperCamelCase : List[str] = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
UpperCamelCase : Dict = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
UpperCamelCase : Dict = new_name.replace("""norm""" , """layernorm""" )
UpperCamelCase : Union[str, Any] = """efficientformer.""" + new_name
else:
UpperCamelCase : Tuple = """efficientformer.encoder.""" + new_name
return new_name
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for key in checkpoint.copy().keys():
UpperCamelCase : Optional[int] = checkpoint.pop(SCREAMING_SNAKE_CASE )
UpperCamelCase : int = val
return checkpoint
def UpperCamelCase ():
UpperCamelCase : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase : List[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = torch.load(SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""]
UpperCamelCase : int = EfficientFormerConfig.from_json_file(SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = EfficientFormerForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
UpperCamelCase : List[str] = config.depths[-1] - config.num_metaad_blocks + 1
UpperCamelCase : Tuple = convert_torch_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
UpperCamelCase : Tuple = prepare_img()
UpperCamelCase : List[Any] = 256
UpperCamelCase : Union[str, Any] = 224
UpperCamelCase : Any = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
UpperCamelCase : Optional[Any] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
# original processing pipeline
UpperCamelCase : Optional[Any] = Compose(
[
Resize(SCREAMING_SNAKE_CASE , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(SCREAMING_SNAKE_CASE ),
ToTensor(),
Normalize(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
] )
UpperCamelCase : str = image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = model(SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = outputs.logits
UpperCamelCase : Dict = (1, 1000)
if "l1" in model_name:
UpperCamelCase : int = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :10] , SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
UpperCamelCase : Optional[Any] = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :10] , SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
UpperCamelCase : Any = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="""Add model""" , use_temp_dir=SCREAMING_SNAKE_CASE , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="""Add image processor""" , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
__magic_name__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
__magic_name__ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 102 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) ,np.inf )
a_ = 0
a_ = np.empty((rows, cols) ,dtype=lowercase__ )
a_ = None
while queue:
((a_) , (a_)) = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ ,(dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case = '''▁'''
snake_case = {'''vocab_file''': '''spiece.model'''}
snake_case = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
snake_case = {
'''google/pegasus-xsum''': 5_1_2,
}
snake_case = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Tuple = VOCAB_FILES_NAMES
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int]="<pad>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Any="<unk>" , __lowerCamelCase : Union[str, Any]="<mask_2>" , __lowerCamelCase : Optional[Any]="<mask_1>" , __lowerCamelCase : List[Any]=None , __lowerCamelCase : int=1_0_3 , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : List[str] , ):
"""simple docstring"""
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError(
f"""additional_special_tokens should be of type {type(__lowerCamelCase )}, but is"""
f""" {type(__lowerCamelCase )}""" )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(__lowerCamelCase ) , self.offset - 1 )
]
if len(set(__lowerCamelCase ) ) != len(__lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , mask_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token_sent=__lowerCamelCase , offset=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_snake_case = mask_token_sent
_snake_case = vocab_file
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
# add special tokens to encoder dict
_snake_case = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
_snake_case = {v: k for k, v in self.encoder.items()}
@property
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return len(self.sp_model ) + self.offset
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
"""simple docstring"""
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self : Union[str, Any] , __lowerCamelCase : Any ):
"""simple docstring"""
_snake_case = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_snake_case = {}
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str ):
"""simple docstring"""
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : str ):
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
_snake_case = self.sp_model.piece_to_id(__lowerCamelCase )
return sp_id + self.offset
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int ):
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
_snake_case = self.sp_model.IdToPiece(index - self.offset )
return token
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[str] ):
"""simple docstring"""
_snake_case = []
_snake_case = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
_snake_case = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : int , __lowerCamelCase : Optional[int]=False ):
"""simple docstring"""
return 1
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str ):
"""simple docstring"""
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __UpperCAmelCase ( self : str , __lowerCamelCase : List , __lowerCamelCase : Optional[List] = None , __lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(__lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(__lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , '''wb''' ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 103 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ ) as metadata_file:
a_ = json.load(lowercase__ )
a_ = LukeConfig(use_entity_aware_attention=lowercase__ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
a_ = torch.load(lowercase__ ,map_location="cpu" )["module"]
# Load the entity vocab file
a_ = load_original_entity_vocab(lowercase__ )
# add an entry for [MASK2]
a_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
a_ = AddedToken("<ent>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
a_ = AddedToken("<ent2>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"r" ) as f:
a_ = json.load(lowercase__ )
a_ = "MLukeTokenizer"
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
with open(os.path.join(lowercase__ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
a_ = tokenizer.convert_tokens_to_ids(["@"] )[0]
a_ = tokenizer.convert_tokens_to_ids(["#"] )[0]
a_ = state_dict["embeddings.word_embeddings.weight"]
a_ = word_emb[ent_init_index].unsqueeze(0 )
a_ = word_emb[enta_init_index].unsqueeze(0 )
a_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ = state_dict[bias_name]
a_ = decoder_bias[ent_init_index].unsqueeze(0 )
a_ = decoder_bias[enta_init_index].unsqueeze(0 )
a_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ = F"""encoder.layer.{layer_index}.attention.self."""
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ = state_dict["entity_embeddings.entity_embeddings.weight"]
a_ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ = state_dict["entity_predictions.bias"]
a_ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ = LukeForMaskedLM(config=lowercase__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
a_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
a_ = state_dict[key]
else:
a_ = state_dict[key]
a_ , a_ = model.load_state_dict(lowercase__ ,strict=lowercase__ )
if set(lowercase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ = MLukeTokenizer.from_pretrained(lowercase__ ,task="entity_classification" )
a_ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
a_ = (0, 9)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 33, 768) )
a_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 1, 768) )
a_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
a_ = "Tokyo is the capital of <mask>."
a_ = (24, 30)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
a_ = encoding["input_ids"][0].tolist()
a_ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
a_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase__ )
a_ = outputs.entity_logits[0][0].argmax().item()
a_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = ["[MASK]", "[PAD]", "[UNK]"]
a_ = [json.loads(lowercase__ ) for line in open(lowercase__ )]
a_ = {}
for entry in data:
a_ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ = entity_id
break
a_ = F"""{language}:{entity_name}"""
a_ = entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 685 | 0 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(UpperCAmelCase_, UpperCAmelCase_ ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =LxmertTokenizer
_UpperCAmelCase =LxmertTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
super().setUp()
a_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Optional[Any] , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [7, 4, 5, 10, 8, 9])
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = "I was born in 92000, and this is falsé."
a_ = tokenizer.tokenize(a)
a_ = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
a_ = tokenizer.encode(a , add_special_tokens=a)
a_ = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(a)
a_ = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
| 685 | 0 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
super().__init__(*snake_case__ ,**snake_case__ )
self.check_model_type(snake_case__ )
def snake_case ( self ,snake_case__=None ,snake_case__=None ,snake_case__=None ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}, {}
if padding is not None:
SCREAMING_SNAKE_CASE_ : Any = padding
if truncation is not None:
SCREAMING_SNAKE_CASE_ : Tuple = truncation
if top_k is not None:
SCREAMING_SNAKE_CASE_ : int = top_k
return preprocess_params, {}, postprocess_params
def __call__( self ,snake_case__ ,snake_case__ = None ,**snake_case__ ):
if isinstance(snake_case__ ,(Image.Image, str) ) and isinstance(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'image': image, 'question': question}
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = image
SCREAMING_SNAKE_CASE_ : List[Any] = super().__call__(snake_case__ ,**snake_case__ )
return results
def snake_case ( self ,snake_case__ ,snake_case__=False ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : List[str] = load_image(inputs['image'] )
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(
inputs['question'] ,return_tensors=self.framework ,padding=snake_case__ ,truncation=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.image_processor(images=snake_case__ ,return_tensors=self.framework )
model_inputs.update(snake_case__ )
return model_inputs
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model(**snake_case__ )
return model_outputs
def snake_case ( self ,snake_case__ ,snake_case__=5 ):
if top_k > self.model.config.num_labels:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
SCREAMING_SNAKE_CASE_ : Any = model_outputs.logits.sigmoid()[0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = probs.topk(snake_case__ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = scores.tolist()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(snake_case__ ,snake_case__ )]
| 105 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :str =logging.get_logger(__name__)
__snake_case :Tuple ={
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : Tuple = 'speech_to_text_2'
A_ : Union[str, Any] = ['past_key_values']
A_ : Dict = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : int , __UpperCamelCase : Union[str, Any]=10_000 , __UpperCamelCase : List[Any]=6 , __UpperCamelCase : Any=2_048 , __UpperCamelCase : Optional[Any]=4 , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : Any=True , __UpperCamelCase : List[str]="relu" , __UpperCamelCase : Optional[Any]=256 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : int=0.0 , __UpperCamelCase : List[Any]=0.0_2 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : List[str]=0 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Dict=1_024 , **__UpperCamelCase : Any , ) -> List[Any]:
A = vocab_size
A = d_model
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = decoder_layerdrop
A = use_cache
A = decoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = max_target_positions
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , **__UpperCamelCase , ) | 106 |
'''simple docstring'''
import re
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
a_ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowercase__ ,lowercase__ ) )
if __name__ == "__main__":
a_ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 685 | 0 |
'''simple docstring'''
import math
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_A = range(3 , int(math.sqrt(__snake_case ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _SCREAMING_SNAKE_CASE ( __snake_case : Tuple , __snake_case : Dict=1 , **__snake_case : str ):
_A = factor * value
_A = value
while not is_prime(__snake_case ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__snake_case )
return value
| 107 |
'''simple docstring'''
import argparse
import os
import re
a_ = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
a_ = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __UpperCAmelCase (lowercase__ ,lowercase__ = False ) -> List[Any]:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.read()
a_ = content.split("\n" )
a_ = []
a_ = 0
while line_idx < len(lowercase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a_ = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a_ = sorted(lowercase__ ,key=lambda lowercase__ : _re_identifier.search(lowercase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(lowercase__ ) )
elif "\n".join(lowercase__ ) != content:
return True
def __UpperCAmelCase (lowercase__ = False ) -> Optional[int]:
'''simple docstring'''
a_ = [os.path.join(lowercase__ ,lowercase__ ) for f in os.listdir(lowercase__ ) if f.endswith(".py" )]
a_ = [sort_auto_mapping(lowercase__ ,overwrite=lowercase__ ) for fname in fnames]
if not overwrite and any(lowercase__ ):
a_ = [f for f, d in zip(lowercase__ ,lowercase__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(lowercase__ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 685 | 0 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Optional[Any]:
# Load checkpoint
_UpperCAmelCase = torch.load(__snake_case , map_location="""cpu""" )
_UpperCAmelCase = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
_UpperCAmelCase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_UpperCAmelCase = v
else:
_UpperCAmelCase = v
_UpperCAmelCase = chkpt["""params"""]
_UpperCAmelCase = {n: v for n, v in config.items() if not isinstance(__snake_case , (torch.FloatTensor, numpy.ndarray) )}
_UpperCAmelCase = chkpt["""dico_word2id"""]
_UpperCAmelCase = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 1_3 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
_UpperCAmelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_UpperCAmelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
_UpperCAmelCase = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(__snake_case , __snake_case )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__snake_case , indent=2 ) + """\n""" )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__snake_case , indent=2 ) + """\n""" )
if __name__ == "__main__":
__a: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__a: Optional[Any] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path) | 108 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685 | 0 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a = TypeVar("T")
class __a ( Generic[T] ):
__UpperCamelCase : deque[T] # Cache store of keys
__UpperCamelCase : set[T] # References of the keys in cache
__UpperCamelCase : int = 10 # Maximum capacity of cache
def __init__( self : Any ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = deque()
__SCREAMING_SNAKE_CASE = set()
if not n:
__SCREAMING_SNAKE_CASE = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
__SCREAMING_SNAKE_CASE = n
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__SCREAMING_SNAKE_CASE = self.dq_store.pop()
self.key_reference.remove(lowerCamelCase )
else:
self.dq_store.remove(lowerCamelCase )
self.dq_store.appendleft(lowerCamelCase )
self.key_reference.add(lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for k in self.dq_store:
print(lowerCamelCase )
def __repr__( self : Dict ):
'''simple docstring'''
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
a = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 109 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[Any]) ->List[str]:
'''simple docstring'''
a_ = [
[],
[],
[],
]
def _lowerCAmelCase ( self: Dict , a: int , a: int) ->None:
'''simple docstring'''
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(a)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__( self: Dict) ->str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any) ->List[str]:
'''simple docstring'''
a_ = []
def _lowerCAmelCase ( self: int , a: int) ->None:
'''simple docstring'''
if len(self.queue) == 1_00:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(a)
def _lowerCAmelCase ( self: List[str]) ->int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
a_ = min(self.queue)
self.queue.remove(a)
return data
def __str__( self: Optional[int]) ->str:
'''simple docstring'''
return str(self.queue)
def __UpperCAmelCase () -> Union[str, Any]:
'''simple docstring'''
a_ = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 0 |
"""simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def lowerCAmelCase_( lowercase_ : int ) -> int:
_lowerCamelCase = int(lowercase__ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = t // 36_00, (t // 60) % 60, t % 60
return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}"""
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : int=3_00 ) -> Union[str, Any]:
return F"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def lowerCAmelCase_( lowercase_ : Optional[int] ) -> Any:
_lowerCamelCase = '''<table border=\"1\" class=\"dataframe\">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowerCamelCase = F"""{elt:.6f}""" if isinstance(lowercase__ , lowercase__ ) else str(lowercase__ )
html_code += F""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCamelCase_:
'''simple docstring'''
lowercase__ : List[Any] = 5
lowercase__ : Dict = 0.2
def __init__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = 3_0_0 , ):
_lowerCamelCase = total
_lowerCamelCase = '''''' if prefix is None else prefix
_lowerCamelCase = leave
_lowerCamelCase = parent
_lowerCamelCase = width
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = None ):
_lowerCamelCase = value
if comment is not None:
_lowerCamelCase = comment
if self.last_value is None:
_lowerCamelCase = _lowerCamelCase = time.time()
_lowerCamelCase = _lowerCamelCase = value
_lowerCamelCase = _lowerCamelCase = None
_lowerCamelCase = self.warmup
_lowerCamelCase = 1
self.update_bar(lowerCamelCase__ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_lowerCamelCase = time.time()
_lowerCamelCase = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowerCamelCase = self.elapsed_time / (value - self.start_value)
else:
_lowerCamelCase = None
if value >= self.total:
_lowerCamelCase = self.total
_lowerCamelCase = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowerCamelCase = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCamelCase__ )
_lowerCamelCase = value
_lowerCamelCase = current_time
if self.average_time_per_item is None:
_lowerCamelCase = 1
else:
_lowerCamelCase = max(int(self.update_every / self.average_time_per_item ) , 1 )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None ):
_lowerCamelCase = ''' ''' * (len(str(self.total ) ) - len(str(lowerCamelCase__ ) )) + str(lowerCamelCase__ )
if self.elapsed_time is None:
_lowerCamelCase = F"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
_lowerCamelCase = F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
_lowerCamelCase = (
F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
F""" {format_time(self.predicted_remaining )}"""
)
self.label += F""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F""", {self.comment}]"""
self.display()
def snake_case__ ( self ):
_lowerCamelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowerCamelCase = disp.display(disp.HTML(self.html_code ) , display_id=lowerCamelCase__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case__ ( self ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class lowerCamelCase_( lowercase_ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=None ):
super().__init__(lowerCamelCase__ )
_lowerCamelCase = None if column_names is None else [column_names]
_lowerCamelCase = None
def snake_case__ ( self ):
_lowerCamelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowerCamelCase = disp.display(disp.HTML(self.html_code ) , display_id=lowerCamelCase__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case__ ( self , lowerCamelCase__ ):
if self.inner_table is None:
_lowerCamelCase = [list(values.keys() ), list(values.values() )]
else:
_lowerCamelCase = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCamelCase__ )
_lowerCamelCase = columns
self.inner_table.append([values[c] for c in columns] )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=3_0_0 ):
_lowerCamelCase = NotebookProgressBar(lowerCamelCase__ , prefix=lowerCamelCase__ , parent=self , width=lowerCamelCase__ )
return self.child_bar
def snake_case__ ( self ):
_lowerCamelCase = None
self.display()
class lowerCamelCase_( lowercase_ ):
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = False
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
_lowerCamelCase = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
_lowerCamelCase = NotebookTrainingTracker(state.max_steps , lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
_lowerCamelCase = int(state.epoch ) if int(state.epoch ) == state.epoch else F"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=F"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
_lowerCamelCase = False
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
if not has_length(lowerCamelCase__ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowerCamelCase = self.training_tracker.add_child(len(lowerCamelCase__ ) )
else:
_lowerCamelCase = NotebookProgressBar(len(lowerCamelCase__ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowerCamelCase = None
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowerCamelCase = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowerCamelCase = state.global_step
self.training_tracker.write_line(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
if self.training_tracker is not None:
_lowerCamelCase = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
_lowerCamelCase = log['''loss''']
break
if self.first_column == "Epoch":
_lowerCamelCase = int(state.epoch )
else:
_lowerCamelCase = state.global_step
_lowerCamelCase = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
_lowerCamelCase = re.sub(R'''\_loss$''' , '''''' , lowerCamelCase__ )
_lowerCamelCase = metrics.pop('''total_flos''' , lowerCamelCase__ )
_lowerCamelCase = metrics.pop('''epoch''' , lowerCamelCase__ )
_lowerCamelCase = metrics.pop(F"""{metric_key_prefix}_runtime""" , lowerCamelCase__ )
_lowerCamelCase = metrics.pop(F"""{metric_key_prefix}_samples_per_second""" , lowerCamelCase__ )
_lowerCamelCase = metrics.pop(F"""{metric_key_prefix}_steps_per_second""" , lowerCamelCase__ )
_lowerCamelCase = metrics.pop(F"""{metric_key_prefix}_jit_compilation_time""" , lowerCamelCase__ )
for k, v in metrics.items():
if k == F"""{metric_key_prefix}_loss""":
_lowerCamelCase = v
else:
_lowerCamelCase = k.split('''_''' )
_lowerCamelCase = ''' '''.join([part.capitalize() for part in splits[1:]] )
_lowerCamelCase = v
self.training_tracker.write_line(lowerCamelCase__ )
self.training_tracker.remove_child()
_lowerCamelCase = None
# Evaluation takes a long time so we should force the next update.
_lowerCamelCase = True
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
self.training_tracker.update(
state.global_step , comment=F"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=lowerCamelCase__ )
_lowerCamelCase = None
| 661 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a_ = Dataset.from_dict(lowercase__ )
return dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = get_dataset()
a_ = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = get_dataset()
a_ , a_ = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2)
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a)
| 685 | 0 |
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowerCAmelCase_ ( ) -> List[Any]:
a_ : Optional[int] = torch.nn.Linear(2, 4 )
a_ : Optional[int] = torch.optim.AdamW(model.parameters(), lr=1.0 )
a_ : Optional[int] = torch.optim.lr_scheduler.OneCycleLR(lowercase__, max_lr=0.01, steps_per_epoch=2, epochs=1 )
a_ : Optional[Any] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
a_ : Any = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> int:
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Any:
a_ : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(lowercase__ )
class snake_case_ ( lowercase_ ):
@require_cuda
def snake_case_ ( self ):
a_ : int = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a_ ):
a_ : int = Accelerator(cpu=a_ )
def snake_case_ ( self ):
a_ : int = Accelerator()
a_ : str = GradientState()
assert state.num_steps == 1
a_ : Optional[Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
a_ : List[Any] = False
assert state.sync_gradients is False
GradientState._reset_state()
def snake_case_ ( self ):
a_ : int = Accelerator()
a_ , a_ , a_ , a_ , a_ : List[str] = create_components()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : Tuple = accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def snake_case_ ( self ):
a_ : Union[str, Any] = Accelerator()
a_ , a_ , a_ , a_ , a_ : Any = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def snake_case_ ( self ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a_ , **a_ ):
pass
with patch("torch.cuda.set_device" , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
a_ : int = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def snake_case_ ( self ):
a_ : Dict = Accelerator()
a_ , a_ , a_ , a_ , a_ : Union[str, Any] = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
a_ : Dict = get_signature(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1e-3 )
def snake_case_ ( self ):
a_ : int = Accelerator()
a_ , a_ , a_ , a_ , a_ : int = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
a_ : Union[str, Any] = get_signature(a_ )
# saving hook
def save_config(a_ , a_ , a_ ):
a_ : Any = {"class_name": models[0].__class__.__name__}
with open(os.path.join(a_ , "data.json" ) , "w" ) as f:
json.dump(a_ , a_ )
# loading hook
def load_config(a_ , a_ ):
with open(os.path.join(a_ , "data.json" ) , "r" ) as f:
a_ : Any = json.load(a_ )
a_ : int = config["class_name"]
a_ : List[str] = accelerator.register_save_state_pre_hook(a_ )
a_ : Any = accelerator.register_load_state_pre_hook(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1e-3 )
# random class name to verify correct one is loaded
a_ : Tuple = "random"
# make sure loaded weights match with hooks
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks removed
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1e-3 )
# random class name to verify correct one is loaded
a_ : Union[str, Any] = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def snake_case_ ( self ):
a_ : Tuple = Accelerator()
a_ , a_ , a_ , a_ , a_ : Optional[Any] = create_components()
a_ : Union[str, Any] = None
# This should work
a_ , a_ , a_ , a_ , a_ , a_ : Optional[Any] = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertTrue(dummy_obj is None )
def snake_case_ ( self ):
a_ : str = Accelerator()
a_ , a_ , a_ , a_ , a_ : Optional[Any] = create_components()
a_ : Optional[int] = [1, 2, 3]
# This should work
a_ , a_ , a_ , a_ , a_ , a_ : Optional[int] = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertEqual(
getattr(a_ , "_is_accelerate_prepared" , a_ ) , a_ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(a_ , "_is_accelerate_prepared" , a_ ) , a_ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(a_ , "_is_accelerate_prepared" , a_ ) , a_ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(a_ , "_is_accelerate_prepared" , a_ ) , a_ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(a_ , "_is_accelerate_prepared" , a_ ) , a_ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(a_ , "_is_accelerate_prepared" , a_ ) , a_ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def snake_case_ ( self ):
from transformers import AutoModelForCausalLM
a_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=a_ , device_map={"": 0} , )
a_ : Dict = Accelerator()
# This should work
a_ : List[Any] = accelerator.prepare(a_ )
@slow
@require_bnb
def snake_case_ ( self ):
from transformers import AutoModelForCausalLM
a_ : str = Accelerator()
with init_empty_weights():
a_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
a_ : List[Any] = infer_auto_device_map(a_ )
a_ : List[Any] = "cpu"
a_ : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ )
# This should not work and get value error
with self.assertRaises(a_ ):
a_ : List[str] = accelerator.prepare(a_ )
@slow
@require_bnb
@require_multi_gpu
def snake_case_ ( self ):
from transformers import AutoModelForCausalLM
a_ : str = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
a_ : List[str] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
a_ : Dict = infer_auto_device_map(a_ )
a_ : List[Any] = 1
a_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=a_ , device_map=a_ , )
a_ : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(a_ ):
a_ : List[str] = accelerator.prepare(a_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def snake_case_ ( self ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
a_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
a_ : Optional[Any] = infer_auto_device_map(a_ )
a_ : str = 1
a_ : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=a_ , device_map=a_ , )
a_ : Dict = Accelerator()
# This should work
a_ : Optional[Any] = accelerator.prepare(a_ )
@require_cuda
def snake_case_ ( self ):
a_ : Tuple = torch.nn.Linear(1_0 , 1_0 )
a_ : Union[str, Any] = torch.optim.SGD(model.parameters() , lr=0.01 )
a_ : Dict = Accelerator(cpu=a_ )
a_ : Union[str, Any] = accelerator.prepare(a_ ) | 237 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , *a: str , **a: Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 | 0 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__a : Tuple = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
__a : str = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
__a : Optional[Any] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n"
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[Any] , __lowercase : List[str] ) -> int:
"""simple docstring"""
return float((preds == labels).mean() )
def _SCREAMING_SNAKE_CASE ( __lowercase : Union[str, Any] , __lowercase : str ) -> Tuple:
"""simple docstring"""
__A = simple_accuracy(lowercase__ , lowercase__ )
__A = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _SCREAMING_SNAKE_CASE ( __lowercase : Any , __lowercase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__A = float(pearsonr(lowercase__ , lowercase__ )[0] )
__A = float(spearmanr(lowercase__ , lowercase__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any ):
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(UpperCamelCase_ , UpperCamelCase_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(UpperCamelCase_ , UpperCamelCase_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 637 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Any , a: Path , a: Union[str, None] = None , a: Union[List[str], None] = None , a: Union[str, List[str], None] = None , a: bool = True , ) ->Optional[Any]:
'''simple docstring'''
a_ = [file for file in os.listdir(a) if os.path.isfile(os.path.join(a , a))]
if identifier is not None:
a_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a):
for n_ in n_identifier:
a_ = [file for file in files if n_ not in file]
else:
a_ = [file for file in files if n_identifier not in file]
a_ = ignore_files or []
ignore_files.append("__init__.py")
a_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a)
if only_modules:
a_ = file.split(".")[0]
try:
a_ = getattr(a , a)
a_ = doctest.DocTestSuite(a)
a_ = unittest.TextTestRunner().run(a)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
a_ = doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "modeling"
a_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "tokenization"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "configuration"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a)
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = Path("docs/source")
a_ = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a)
| 685 | 0 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__magic_name__ : Tuple = logging.get_logger(__name__)
__magic_name__ : Optional[int] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__magic_name__ : List[Any] = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__magic_name__ : Any = {
"""facebook/blenderbot_small-90M""": 512,
}
class __SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = BlenderbotSmallTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase=False , lowerCamelCase=True , **lowerCamelCase , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=lowerCamelCase , merges=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase , ) , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , **lowerCamelCase , )
_snake_case = add_prefix_space
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase=None ):
_snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None ):
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 672 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 100 ) -> int:
'''simple docstring'''
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( lowercase_ ):
UpperCamelCase_ = (CMStochasticIterativeScheduler,)
UpperCamelCase_ = 10
def lowercase_ ( self , **A_ ) -> List[Any]:
"""simple docstring"""
_lowercase: int = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_02,
'''sigma_max''': 80.0,
}
config.update(**A_ )
return config
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowercase: str = 10
_lowercase: str = self.get_scheduler_config()
_lowercase: int = self.scheduler_classes[0](**A_ )
scheduler.set_timesteps(A_ )
_lowercase: Optional[Any] = scheduler.timesteps[0]
_lowercase: Optional[Any] = scheduler.timesteps[1]
_lowercase: Tuple = self.dummy_sample
_lowercase: Optional[Any] = 0.1 * sample
_lowercase: Tuple = scheduler.step(A_ , A_ , A_ ).prev_sample
_lowercase: str = scheduler.step(A_ , A_ , A_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase_ ( self ) -> str:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=A_ )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: str = self.scheduler_classes[0]
_lowercase: Any = self.get_scheduler_config()
_lowercase: Any = scheduler_class(**A_ )
_lowercase: Optional[int] = 1
scheduler.set_timesteps(A_ )
_lowercase: Union[str, Any] = scheduler.timesteps
_lowercase: str = torch.manual_seed(0 )
_lowercase: Tuple = self.dummy_model()
_lowercase: Any = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(A_ ):
# 1. scale model input
_lowercase: Union[str, Any] = scheduler.scale_model_input(A_ , A_ )
# 2. predict noise residual
_lowercase: List[Any] = model(A_ , A_ )
# 3. predict previous sample x_t-1
_lowercase: int = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
_lowercase: Dict = pred_prev_sample
_lowercase: Optional[int] = torch.sum(torch.abs(A_ ) )
_lowercase: Optional[int] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: int = self.scheduler_classes[0]
_lowercase: str = self.get_scheduler_config()
_lowercase: Dict = scheduler_class(**A_ )
_lowercase: Union[str, Any] = [106, 0]
scheduler.set_timesteps(timesteps=A_ )
_lowercase: Union[str, Any] = scheduler.timesteps
_lowercase: List[Any] = torch.manual_seed(0 )
_lowercase: List[str] = self.dummy_model()
_lowercase: List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
_lowercase: Dict = scheduler.scale_model_input(A_ , A_ )
# 2. predict noise residual
_lowercase: int = model(A_ , A_ )
# 3. predict previous sample x_t-1
_lowercase: str = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
_lowercase: Optional[int] = pred_prev_sample
_lowercase: int = torch.sum(torch.abs(A_ ) )
_lowercase: Union[str, Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowercase: List[Any] = self.scheduler_classes[0]
_lowercase: Optional[Any] = self.get_scheduler_config()
_lowercase: Tuple = scheduler_class(**A_ )
_lowercase: List[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(A_ , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=A_ )
def lowercase_ ( self ) -> str:
"""simple docstring"""
_lowercase: Optional[Any] = self.scheduler_classes[0]
_lowercase: List[str] = self.get_scheduler_config()
_lowercase: str = scheduler_class(**A_ )
_lowercase: List[Any] = [39, 30, 12, 1, 0]
_lowercase: Optional[Any] = len(A_ )
with self.assertRaises(A_ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=A_ , timesteps=A_ )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_lowercase: List[str] = self.scheduler_classes[0]
_lowercase: int = self.get_scheduler_config()
_lowercase: Tuple = scheduler_class(**A_ )
_lowercase: Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=A_ )
| 353 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =(PNDMScheduler,)
_UpperCAmelCase =(('''num_inference_steps''', 50),)
def _lowerCAmelCase ( self: int , **a: Optional[int]) ->Any:
'''simple docstring'''
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a)
return config
def _lowerCAmelCase ( self: Any , a: Tuple=0 , **a: Any) ->Any:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
new_scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Union[str, Any] , a: str=0 , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
# copy over dummy past residuals
new_scheduler.set_timesteps(a)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: Dict , **a: int) ->Any:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.prk_timesteps):
a_ = model(a , a)
a_ = scheduler.step_prk(a , a , a).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ = model(a , a)
a_ = scheduler.step_plms(a , a , a).prev_sample
return sample
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , "set_timesteps"):
scheduler.set_timesteps(a)
elif num_inference_steps is not None and not hasattr(a , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , 0 , a , **a).prev_sample
a_ = scheduler.step_prk(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step_plms(a , 0 , a , **a).prev_sample
a_ = scheduler.step_plms(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(steps_offset=1)
a_ = scheduler_class(**a)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]) , )
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=a , beta_end=a)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=a)
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = 27
for scheduler_class in self.scheduler_classes:
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ = scheduler.step_prk(a , a , a).prev_sample
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
with self.assertRaises(a):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.full_loop()
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 685 | 0 |
"""simple docstring"""
import qiskit
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->qiskit.result.counts.Counts:
"""simple docstring"""
__lowercase : Optional[int] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
__lowercase : List[Any] = qiskit.QuantumCircuit(lowercase__, lowercase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0], [0] )
# Execute the circuit on the simulator
__lowercase : Tuple = qiskit.execute(lowercase__, lowercase__, shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 575 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "bird"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
a_ = pipe.prepare_image_inputs([canny_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "Chef in the kitchen"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
a_ = pipe.prepare_image_inputs([pose_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 685 | 0 |
"""simple docstring"""
def _lowerCamelCase ( lowerCamelCase__ : List[str] = 1_00 ):
lowercase__ : Optional[Any] = n * (n + 1) * (2 * n + 1) / 6
lowercase__ : Union[str, Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"{solution() = }") | 200 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 1000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 ,lowercase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = tempfile.mkdtemp()
lowerCAmelCase__ : Any = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
lowerCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCAmelCase__ : int = {
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
'''do_convert_rgb''': True,
}
lowerCAmelCase__ : Any = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self , **__UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__( self , **__UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__( self , **__UpperCAmelCase ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__( self ):
shutil.rmtree(self.tmpdirname )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ : Optional[Any] = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__( self ):
lowerCAmelCase__ : str = self.get_tokenizer()
lowerCAmelCase__ : str = self.get_rust_tokenizer()
lowerCAmelCase__ : int = self.get_image_processor()
lowerCAmelCase__ : Optional[int] = ChineseCLIPProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = ChineseCLIPProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : str = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : int = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
lowerCAmelCase__ : List[str] = self.get_image_processor(do_normalize=__UpperCAmelCase )
lowerCAmelCase__ : str = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=__UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = self.get_image_processor()
lowerCAmelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase__ : int = ChineseCLIPProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ : str = self.prepare_image_inputs()
lowerCAmelCase__ : Optional[int] = image_processor(__UpperCAmelCase , return_tensors='''np''' )
lowerCAmelCase__ : Tuple = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self.get_image_processor()
lowerCAmelCase__ : Dict = self.get_tokenizer()
lowerCAmelCase__ : Optional[int] = ChineseCLIPProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCAmelCase__ : Dict = processor(text=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self.get_image_processor()
lowerCAmelCase__ : int = self.get_tokenizer()
lowerCAmelCase__ : Optional[int] = ChineseCLIPProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = '''Alexandra,T-shirt的价格是15便士。'''
lowerCAmelCase__ : List[str] = self.prepare_image_inputs()
lowerCAmelCase__ : Union[str, Any] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = self.get_image_processor()
lowerCAmelCase__ : List[str] = self.get_tokenizer()
lowerCAmelCase__ : List[str] = ChineseCLIPProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ : List[Any] = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self.get_image_processor()
lowerCAmelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase__ : Optional[Any] = ChineseCLIPProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = '''Alexandra,T-shirt的价格是15便士。'''
lowerCAmelCase__ : Any = self.prepare_image_inputs()
lowerCAmelCase__ : int = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 678 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 ,lowercase__ ,2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def __UpperCAmelCase (lowercase__ = 999966663333 ) -> int:
'''simple docstring'''
a_ = math.floor(math.sqrt(lowercase__ ) ) + 100
a_ = prime_sieve(lowercase__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 685 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__lowerCAmelCase = random.Random()
if is_torch_available():
import torch
def _lowercase ( a__ : List[Any] , a__ : Optional[int]=1.0 , a__ : Union[str, Any]=None , a__ : int=None ) -> Any:
"""simple docstring"""
if rng is None:
_UpperCamelCase = global_rng
_UpperCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self , lowerCamelCase_ , lowerCamelCase_=7 , lowerCamelCase_=4_00 , lowerCamelCase_=20_00 , lowerCamelCase_=1 , lowerCamelCase_=0.0 , lowerCamelCase_=1_60_00 , lowerCamelCase_=True , lowerCamelCase_=True , ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = min_seq_length
_UpperCamelCase = max_seq_length
_UpperCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCamelCase = feature_size
_UpperCamelCase = padding_value
_UpperCamelCase = sampling_rate
_UpperCamelCase = return_attention_mask
_UpperCamelCase = do_normalize
def lowercase ( self ) -> int:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase ( self , lowerCamelCase_=False , lowerCamelCase_=False ) -> Optional[Any]:
"""simple docstring"""
def _flatten(lowerCamelCase_ ):
return list(itertools.chain(*lowerCamelCase_ ) )
if equal_length:
_UpperCamelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_UpperCamelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCamelCase = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase_ ( lowercase_ , unittest.TestCase ):
__lowercase : Optional[int] = ASTFeatureExtractor
def lowercase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase = ASTFeatureExtractionTester(self )
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_UpperCamelCase = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test not batched input
_UpperCamelCase = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
_UpperCamelCase = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
# Test batched
_UpperCamelCase = feat_extract(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors="np" ).input_values
_UpperCamelCase = feat_extract(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCamelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
_UpperCamelCase = np.asarray(lowerCamelCase_ )
_UpperCamelCase = feat_extract(lowerCamelCase_ , return_tensors="np" ).input_values
_UpperCamelCase = feat_extract(lowerCamelCase_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
@require_torch
def lowercase ( self ) -> Any:
"""simple docstring"""
import torch
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase = np.random.rand(1_00 ).astype(np.floataa )
_UpperCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCamelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_UpperCamelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowercase ( self , lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
from datasets import load_dataset
_UpperCamelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCamelCase = ds.sort("id" ).select(range(lowerCamelCase_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def lowercase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase = torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] )
# fmt: on
_UpperCamelCase = self._load_datasamples(1 )
_UpperCamelCase = ASTFeatureExtractor()
_UpperCamelCase = feature_extractor(lowerCamelCase_ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase_ , atol=1E-4 ) )
| 147 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = UniSpeechSatForSequenceClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["projector.weight"]
a_ = downstream_dict["projector.bias"]
a_ = downstream_dict["model.post_net.linear.weight"]
a_ = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["model.linear.weight"]
a_ = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = UniSpeechSatForXVector.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["connector.weight"]
a_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
a_ = checkpoint["Downstream"]
a_ = UniSpeechSatConfig.from_pretrained(lowercase__ )
a_ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ ,return_attention_mask=lowercase__ ,do_normalize=lowercase__ )
a_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ = convert_classification(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ = convert_diarization(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForXVector" ):
a_ = convert_xvector(lowercase__ ,lowercase__ ,lowercase__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 685 | 0 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __magic_name__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : List[Any] = FLAX_MODEL_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(FlaxAutoModel)
class __magic_name__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : int = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class __magic_name__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : Tuple = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class __magic_name__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : Tuple = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class __magic_name__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : Any = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class __magic_name__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class __magic_name__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : Tuple = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class __magic_name__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : int = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class __magic_name__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : str = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class __magic_name__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : List[str] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class __magic_name__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : str = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class __magic_name__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : List[str] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class __magic_name__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : int = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 163 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 685 | 0 |
'''simple docstring'''
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__snake_case =2_048
__snake_case =4_096
__snake_case =42
__snake_case =os.environ.pop("""PROCESS_TRAIN""", """false""")
__snake_case ={"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def a_ ( lowerCamelCase : Optional[Any] ):
def choose_first(lowerCamelCase : Dict , lowerCamelCase : Tuple=False ):
assert isinstance(lowercase__ , lowercase__ )
if len(lowercase__ ) == 1:
lowerCAmelCase = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowerCAmelCase = {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
lowerCAmelCase = {'id': example['id']}
lowerCAmelCase = example['annotations']
lowerCAmelCase = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
lowerCAmelCase = ['yes'] if 1 in yes_no_answer else ['no']
lowerCAmelCase = lowerCAmelCase = []
lowerCAmelCase = lowerCAmelCase = []
lowerCAmelCase = ['<cls>']
else:
lowerCAmelCase = ['short']
lowerCAmelCase = choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
lowerCAmelCase = ['long']
lowerCAmelCase = choose_first(annotation['long_answer'] , is_long_answer=lowercase__ )
lowerCAmelCase = []
answer.update(lowercase__ )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
lowerCAmelCase = True
else:
lowerCAmelCase = False
lowerCAmelCase = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , lowercase__ ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def a_ ( lowerCamelCase : Any , lowerCamelCase : Dict=False ):
lowerCAmelCase = _get_single_answer(lowercase__ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCAmelCase = example['document']['tokens']
lowerCAmelCase = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(lowercase__ ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowerCAmelCase = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
lowerCAmelCase = example['document']['tokens']
lowerCAmelCase = answer['start_token']
lowerCAmelCase = answer['end_token']
lowerCAmelCase = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowerCAmelCase = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
lowerCAmelCase = doc['is_html'][answer['start_token'] : answer['end_token']]
lowerCAmelCase = doc['token'][answer['start_token'] : answer['end_token']]
lowerCAmelCase = ' '.join([old[i] for i in range(len(lowercase__ ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , lowercase__ , end='\n' )
print('Old:' , lowercase__ , end='\n\n' )
return {
"context": " ".join(lowercase__ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def a_ ( lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : str=2048 , lowerCamelCase : List[Any]=4096 , lowerCamelCase : Dict=True ):
lowerCAmelCase = get_context_and_ans(lowercase__ , assertion=lowercase__ )
lowerCAmelCase = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowerCAmelCase = tokenizer(example['question']['text'] , out['context'] ).input_ids
lowerCAmelCase = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = input_ids[:q_len]
lowerCAmelCase = range(lowercase__ , len(lowercase__ ) , max_length - doc_stride )
for i in doc_start_indices:
lowerCAmelCase = i + max_length - q_len
lowerCAmelCase = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowercase__ ),
"end_token": [-100] * len(lowercase__ ),
"category": category,
},
}
lowerCAmelCase = out['context'].split()
lowerCAmelCase = splitted_context[answer['end_token']]
lowerCAmelCase = len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=lowercase__ , ).input_ids )
lowerCAmelCase = len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=lowercase__ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowerCAmelCase = len(tokenizer(lowercase__ , add_special_tokens=lowercase__ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowerCAmelCase = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
lowerCAmelCase = answer['start_token']
lowerCAmelCase = answer['end_token']
if assertion:
lowerCAmelCase = tokenizer.decode(lowercase__ )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , lowercase__ , end='\n\n' )
if len(lowercase__ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowerCAmelCase = input_ids[:q_len]
lowerCAmelCase = range(lowercase__ , len(lowercase__ ) , max_length - doc_stride )
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = [] # null, yes, no, long, short
for i in doc_start_indices:
lowerCAmelCase = i + max_length - q_len
lowerCAmelCase = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowerCAmelCase = start_token - i + q_len
lowerCAmelCase = end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
lowerCAmelCase = -100
lowerCAmelCase = -100
answers_category.append('null' )
lowerCAmelCase = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowercase__ )
answers_end_token.append(lowercase__ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(lowercase__ ) )
print('Old:' , tokenizer.decode(lowercase__ ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : Tuple=2048 , lowerCamelCase : Any=4096 , lowerCamelCase : str=False ):
lowerCAmelCase = get_strided_contexts_and_ans(
lowercase__ , lowercase__ , doc_stride=lowercase__ , max_length=lowercase__ , assertion=lowercase__ , )
return example
def a_ ( lowerCamelCase : Any , lowerCamelCase : Optional[Any] ):
with jsonlines.open(lowercase__ , 'a' ) as writer:
for example in tqdm(lowercase__ , total=len(lowercase__ ) , desc='Saving samples ... ' ):
lowerCAmelCase = example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__snake_case =load_dataset("""natural_questions""")
__snake_case =BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
__snake_case =data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
__snake_case ={
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
__snake_case =data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__snake_case =data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
__snake_case ="""nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 133 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = json.loads(f.read() )
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ = b
a_ = idx
for wd in b:
a_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , a: Union[str, Any] , a: Optional[int] , a: List[str]="<|endoftext|>" , a: Union[str, Any]="<|endoftext|>" , a: Dict="<|startoftext|>" , a: Dict="<|endoftext|>" , a: Union[str, Any]=False , **a: Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ = do_clean_text
a_ , a_ , a_ , a_ = load_vocab_and_emoji(a , a)
a_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return len(self.raw_vocab)
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _lowerCAmelCase ( self: Union[str, Any] , a: Any) ->Dict:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text)
def _lowerCAmelCase ( self: int , a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a)
def _lowerCAmelCase ( self: Optional[int] , a: Any) ->str:
'''simple docstring'''
a_ = "".join(a).strip()
return out_string
def _lowerCAmelCase ( self: Any , a: "Conversation") ->List[int]:
'''simple docstring'''
a_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
a_ = input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self: int , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = 0
if os.path.isdir(a):
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ = token_index
writer.write(",".join(a) + "\n")
index += 1
with open(a , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , a)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: Any , a: Union[str, Any] , a: Any) ->List[Any]:
'''simple docstring'''
a_ = vocab # same as swe
a_ = ids_to_tokens # same as bpe
a_ = emoji
a_ = np.max([len(a) for w in self.vocab.keys()])
a_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Dict) ->Any:
'''simple docstring'''
return len(self.ids_to_tokens)
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple) ->Any:
'''simple docstring'''
a_ = self.content_repattera.sub("<URL>" , a)
a_ = self.content_repattera.sub("<EMAIL>" , a)
a_ = self.content_repattera.sub("<TEL>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<PRICE>" , a)
a_ = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _lowerCAmelCase ( self: Any , a: int , a: Optional[int]=False) ->List[str]:
'''simple docstring'''
a_ = text.replace(" " , "<SP>")
a_ = text.replace(" " , "<SP>")
a_ = text.replace("\r\n" , "<BR>")
a_ = text.replace("\n" , "<BR>")
a_ = text.replace("\r" , "<BR>")
a_ = text.replace("\t" , "<TAB>")
a_ = text.replace("—" , "ー")
a_ = text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ = text.replace(a , a)
if clean:
a_ = self.clean_text(a)
def check_simbol(a: Dict):
a_ = x.encode()
if len(a) == 1 and len(a) == 2:
a_ = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a: str):
a_ = x.encode()
if len(a) == 1 and len(a) == 3:
a_ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
a_ = 0
a_ = []
while pos < len(a):
a_ = min(len(a) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ = [] # (token_id, token, pos)
for e in range(a , a , -1):
a_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a) > 2:
a_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(a) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ = sorted(a , key=lambda a: x[0])[0]
result.append(a)
a_ = e
else:
a_ = pos + 1
a_ = text[pos:end]
if check_simbol(a):
result.append("<KIGOU>")
elif checkuae(a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ = end
return result
def _lowerCAmelCase ( self: int , a: List[Any] , a: Any="\n") ->str:
'''simple docstring'''
a_ = []
a_ = []
a_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(a)
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = "".join(a)
return text
| 685 | 0 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> tuple[np.ndarray, np.ndarray]:
_lowerCamelCase , _lowerCamelCase = np.shape(lowercase__ )
if rows != columns:
_lowerCamelCase = (
'''\'table\' has to be of square shaped array but got a '''
F"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(lowercase__ )
_lowerCamelCase = np.zeros((rows, columns) )
_lowerCamelCase = np.zeros((rows, columns) )
for i in range(lowercase__ ):
for j in range(lowercase__ ):
_lowerCamelCase = sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
_lowerCamelCase = (table[i][j] - total) / upper[j][j]
_lowerCamelCase = 1
for j in range(lowercase__ , lowercase__ ):
_lowerCamelCase = sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) )
_lowerCamelCase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , a: Optional[Any] , a: Dict=13 , a: List[str]=7 , a: Optional[Any]=True , a: int=True , a: Any=True , a: Optional[int]=True , a: int=True , a: Dict=False , a: Union[str, Any]=False , a: Dict=False , a: List[str]=2 , a: Union[str, Any]=99 , a: List[Any]=0 , a: Optional[int]=32 , a: List[str]=5 , a: int=4 , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Optional[int]=5_12 , a: str=12 , a: Dict=2 , a: Any=0.02 , a: Optional[int]=3 , a: str=4 , a: Optional[int]="last" , a: Tuple=None , a: Any=None , ) ->int:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_lengths
a_ = use_token_type_ids
a_ = use_labels
a_ = gelu_activation
a_ = sinusoidal_embeddings
a_ = causal
a_ = asm
a_ = n_langs
a_ = vocab_size
a_ = n_special
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = summary_type
a_ = use_proj
a_ = scope
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_input_lengths:
a_ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , 2).float()
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: List[Any] , a: List[Any] , a: Optional[int] , a: int , a: str , a: Any , a: str , a: List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModel(config=a)
model.to(a)
model.eval()
a_ = model(a , lengths=a , langs=a)
a_ = model(a , langs=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Optional[Any] , a: Dict , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Any , a: Tuple , a: str , a: List[str] , ) ->Dict:
'''simple docstring'''
a_ = FlaubertWithLMHeadModel(a)
model.to(a)
model.eval()
a_ = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: Optional[Any] , a: List[Any] , a: List[str] , a: List[str] , a: List[str] , a: Optional[Any] , a: str , a: Union[str, Any] , ) ->str:
'''simple docstring'''
a_ = FlaubertForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Optional[Any] , a: Any , a: Dict , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , ) ->int:
'''simple docstring'''
a_ = FlaubertForQuestionAnswering(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((a_) , ) = result_with_labels.to_tuple()
a_ = model(a , start_positions=a , end_positions=a)
((a_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Union[str, Any] , a: Any , a: Tuple , a: Union[str, Any] , a: int , a: int , a: Dict , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self: str , a: List[str] , a: Dict , a: Tuple , a: Optional[Any] , a: Any , a: Any , a: str , a: str , a: Optional[Any] , ) ->List[Any]:
'''simple docstring'''
a_ = self.num_labels
a_ = FlaubertForTokenClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: Dict , a: Tuple , a: List[Any] , a: Dict , a: Optional[Any] , a: Optional[Any] , a: Optional[Any] , a: Union[str, Any] , a: List[str] , a: Tuple , ) ->Dict:
'''simple docstring'''
a_ = self.num_choices
a_ = FlaubertForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: Any) ->List[Any]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self: Optional[Any] , a: List[Any] , a: Any , a: List[str] , a: Union[str, Any] , a: int) ->int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: List[Any] , a: Tuple=False) ->List[Any]:
'''simple docstring'''
a_ = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModelTester(self)
a_ = ConfigTester(self , config_class=a , emb_dim=37)
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a)
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a)
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a)
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a)
@slow
def _lowerCAmelCase ( self: Any) ->Any:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = FlaubertModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ = True
a_ = model_class(config=a)
a_ = self._prepare_for_class(a , a)
a_ = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt"))
a_ = torch.jit.load(os.path.join(a , "traced_model.pt") , map_location=a)
loaded(inputs_dict["input_ids"].to(a) , inputs_dict["attention_mask"].to(a))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
a_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
a_ = model(a)[0]
a_ = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4))
| 685 | 0 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class snake_case_ ( unittest.TestCase ,lowercase_ ):
def snake_case_ ( self ):
a_ : Optional[int] = load_tool("text-classification" )
self.tool.setup()
a_ : Union[str, Any] = load_tool("text-classification" , remote=a_ )
def snake_case_ ( self ):
a_ : str = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a_ , "positive" )
def snake_case_ ( self ):
a_ : List[str] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a_ , "positive" )
def snake_case_ ( self ):
a_ : Dict = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a_ , "positive" )
def snake_case_ ( self ):
a_ : Any = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a_ , "positive" ) | 237 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase (lowercase__ = 10001 ) -> int:
'''simple docstring'''
try:
a_ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a_ = []
a_ = 2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__a : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["pixel_values"]
def __init__( self : Dict , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 255 , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : bool = True , **UpperCamelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
__A = size if size is not None else {"""shortest_edge""": 224}
__A = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
__A = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__A = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ , param_name="""crop_size""" )
__A = do_resize
__A = size
__A = resample
__A = do_center_crop
__A = crop_size
__A = do_rescale
__A = rescale_factor
__A = do_normalize
__A = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__A = image_std if image_std is not None else OPENAI_CLIP_STD
__A = do_convert_rgb
def lowerCAmelCase_ ( self : Dict , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
__A = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__A = get_resize_output_image_size(UpperCamelCase_ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase_ )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : List[str] , ):
"""simple docstring"""
__A = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(UpperCamelCase_ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[int, float] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : List[Any] , ):
"""simple docstring"""
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Optional[int] , ):
"""simple docstring"""
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase_ ( self : int , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : int = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : float = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_ : Any , ):
"""simple docstring"""
__A = do_resize if do_resize is not None else self.do_resize
__A = size if size is not None else self.size
__A = get_size_dict(UpperCamelCase_ , param_name="""size""" , default_to_square=UpperCamelCase_ )
__A = resample if resample is not None else self.resample
__A = do_center_crop if do_center_crop is not None else self.do_center_crop
__A = crop_size if crop_size is not None else self.crop_size
__A = get_size_dict(UpperCamelCase_ , param_name="""crop_size""" , default_to_square=UpperCamelCase_ )
__A = do_rescale if do_rescale is not None else self.do_rescale
__A = rescale_factor if rescale_factor is not None else self.rescale_factor
__A = do_normalize if do_normalize is not None else self.do_normalize
__A = image_mean if image_mean is not None else self.image_mean
__A = image_std if image_std is not None else self.image_std
__A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__A = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__A = [convert_to_rgb(UpperCamelCase_ ) for image in images]
# All transformations expect numpy arrays.
__A = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
__A = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_center_crop:
__A = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
if do_rescale:
__A = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
__A = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
__A = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__A = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 637 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , ):
_snake_case = size if size is not None else {"shortest_edge": 18}
_snake_case = crop_size if crop_size is not None else {"height": 18, "width": 18}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_normalize
_snake_case = image_mean
_snake_case = image_std
def UpperCamelCase( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase( self ):
_snake_case = LevitImageProcessingTester(self )
@property
def UpperCamelCase( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase( self ):
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def UpperCamelCase( self ):
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_snake_case = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase( self ):
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_snake_case = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase( self ):
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_snake_case = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 672 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) ,np.inf )
a_ = 0
a_ = np.empty((rows, cols) ,dtype=lowercase__ )
a_ = None
while queue:
((a_) , (a_)) = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ ,(dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
if edge <= 0 or not isinstance(lowercase__ , lowercase__ ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
if edge <= 0 or not isinstance(lowercase__ , lowercase__ ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ ) as metadata_file:
a_ = json.load(lowercase__ )
a_ = LukeConfig(use_entity_aware_attention=lowercase__ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
a_ = torch.load(lowercase__ ,map_location="cpu" )["module"]
# Load the entity vocab file
a_ = load_original_entity_vocab(lowercase__ )
# add an entry for [MASK2]
a_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
a_ = AddedToken("<ent>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
a_ = AddedToken("<ent2>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"r" ) as f:
a_ = json.load(lowercase__ )
a_ = "MLukeTokenizer"
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
with open(os.path.join(lowercase__ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
a_ = tokenizer.convert_tokens_to_ids(["@"] )[0]
a_ = tokenizer.convert_tokens_to_ids(["#"] )[0]
a_ = state_dict["embeddings.word_embeddings.weight"]
a_ = word_emb[ent_init_index].unsqueeze(0 )
a_ = word_emb[enta_init_index].unsqueeze(0 )
a_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ = state_dict[bias_name]
a_ = decoder_bias[ent_init_index].unsqueeze(0 )
a_ = decoder_bias[enta_init_index].unsqueeze(0 )
a_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ = F"""encoder.layer.{layer_index}.attention.self."""
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ = state_dict["entity_embeddings.entity_embeddings.weight"]
a_ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ = state_dict["entity_predictions.bias"]
a_ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ = LukeForMaskedLM(config=lowercase__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
a_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
a_ = state_dict[key]
else:
a_ = state_dict[key]
a_ , a_ = model.load_state_dict(lowercase__ ,strict=lowercase__ )
if set(lowercase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ = MLukeTokenizer.from_pretrained(lowercase__ ,task="entity_classification" )
a_ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
a_ = (0, 9)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 33, 768) )
a_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 1, 768) )
a_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
a_ = "Tokyo is the capital of <mask>."
a_ = (24, 30)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
a_ = encoding["input_ids"][0].tolist()
a_ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
a_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase__ )
a_ = outputs.entity_logits[0][0].argmax().item()
a_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = ["[MASK]", "[PAD]", "[UNK]"]
a_ = [json.loads(lowercase__ ) for line in open(lowercase__ )]
a_ = {}
for entry in data:
a_ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ = entity_id
break
a_ = F"""{language}:{entity_name}"""
a_ = entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 685 | 0 |
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase__ ( lowercase_ , lowercase_ ):
"""simple docstring"""
__UpperCAmelCase : Any = "pixel_values"
__UpperCAmelCase : int = False
__UpperCAmelCase : int = TimmBackboneConfig
def __init__( self : Union[str, Any] , lowercase__ : Union[str, Any] , **lowercase__ : Tuple ):
requires_backends(self , "timm" )
super().__init__(lowercase__ )
__lowercase : List[Any] = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(f'backbone {config.backbone} is not supported by timm.' )
if hasattr(lowercase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
__lowercase : Union[str, Any] = getattr(lowercase__ , "use_pretrained_backbone" , lowercase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
__lowercase : Tuple = config.out_indices if getattr(lowercase__ , "out_indices" , lowercase__ ) is not None else (-1,)
__lowercase : Union[str, Any] = timm.create_model(
config.backbone , pretrained=lowercase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowercase__ , **lowercase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__lowercase : Union[str, Any] = self._backbone.return_layers
__lowercase : List[str] = {layer["module"]: str(lowercase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowercase__ )
@classmethod
def snake_case ( cls : Tuple , lowercase__ : Optional[Any] , *lowercase__ : Optional[Any] , **lowercase__ : str ):
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
__lowercase : Optional[Any] = kwargs.pop("config" , TimmBackboneConfig() )
__lowercase : Tuple = kwargs.pop("use_timm_backbone" , lowercase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
__lowercase : Any = kwargs.pop("num_channels" , config.num_channels )
__lowercase : int = kwargs.pop("features_only" , config.features_only )
__lowercase : Dict = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
__lowercase : Optional[Any] = kwargs.pop("out_indices" , config.out_indices )
__lowercase : int = TimmBackboneConfig(
backbone=lowercase__ , num_channels=lowercase__ , features_only=lowercase__ , use_pretrained_backbone=lowercase__ , out_indices=lowercase__ , )
return super()._from_config(lowercase__ , **lowercase__ )
def snake_case ( self : Optional[Any] , lowercase__ : Optional[int] ):
pass
def snake_case ( self : Tuple , lowercase__ : List[Any] , lowercase__ : Any=None , lowercase__ : Dict=None , lowercase__ : Optional[int]=None , **lowercase__ : int ):
__lowercase : Any = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase : List[str] = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__lowercase : Any = self._all_layers
__lowercase : Tuple = self._backbone(lowercase__ , **lowercase__ )
__lowercase : int = self._return_layers
__lowercase : Any = tuple(hidden_states[i] for i in self.out_indices )
else:
__lowercase : Optional[Any] = self._backbone(lowercase__ , **lowercase__ )
__lowercase : Tuple = None
__lowercase : Dict = tuple(lowercase__ )
__lowercase : List[Any] = tuple(lowercase__ ) if hidden_states is not None else None
if not return_dict:
__lowercase : Dict = (feature_maps,)
if output_hidden_states:
__lowercase : Any = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowercase__ , hidden_states=lowercase__ , attentions=lowercase__ )
| 575 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =LxmertTokenizer
_UpperCAmelCase =LxmertTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
super().setUp()
a_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Optional[Any] , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [7, 4, 5, 10, 8, 9])
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = "I was born in 92000, and this is falsé."
a_ = tokenizer.tokenize(a)
a_ = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
a_ = tokenizer.encode(a , add_special_tokens=a)
a_ = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(a)
a_ = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
| 685 | 0 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int ):
lowercase__ : Tuple = x
lowercase__ : int = y
for step in range(lowercase__ ): # noqa: B007
lowercase__ : int = a * a - b * b + x
lowercase__ : List[str] = 2 * a * b + y
lowercase__ : str = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _lowerCamelCase ( lowerCamelCase__ : Optional[int] ):
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _lowerCamelCase ( lowerCamelCase__ : Tuple ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(lowercase__ , 1 , 1 ) )
def _lowerCamelCase ( lowerCamelCase__ : Optional[Any] = 8_00 , lowerCamelCase__ : str = 6_00 , lowerCamelCase__ : Tuple = -0.6 , lowerCamelCase__ : Optional[Any] = 0 , lowerCamelCase__ : List[Any] = 3.2 , lowerCamelCase__ : List[Any] = 50 , lowerCamelCase__ : Tuple = True , ):
lowercase__ : Dict = Image.new("""RGB""" , (image_width, image_height) )
lowercase__ : str = img.load()
# loop through the image-coordinates
for image_x in range(lowercase__ ):
for image_y in range(lowercase__ ):
# determine the figure-coordinates based on the image-coordinates
lowercase__ : Union[str, Any] = figure_width / image_width * image_height
lowercase__ : List[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowercase__ : List[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowercase__ : str = get_distance(lowercase__ , lowercase__ , lowercase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowercase__ : List[str] = get_color_coded_rgb(lowercase__ )
else:
lowercase__ : Union[str, Any] = get_black_and_white_rgb(lowercase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__snake_case = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 200 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase_ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class _lowerCAmelCase ( lowercase_ ):
A__ = 'albert'
def __init__( self , __UpperCAmelCase=3_0000 , __UpperCAmelCase=128 , __UpperCAmelCase=4096 , __UpperCAmelCase=12 , __UpperCAmelCase=1 , __UpperCAmelCase=64 , __UpperCAmelCase=1_6384 , __UpperCAmelCase=1 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-12 , __UpperCAmelCase=0.1 , __UpperCAmelCase="absolute" , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , **__UpperCAmelCase , ):
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ : List[str] = vocab_size
lowerCAmelCase__ : str = embedding_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : int = num_hidden_groups
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = inner_group_num
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : Any = type_vocab_size
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : List[Any] = layer_norm_eps
lowerCAmelCase__ : Optional[Any] = classifier_dropout_prob
lowerCAmelCase__ : Dict = position_embedding_type
class _lowerCAmelCase ( lowercase_ ):
@property
def __magic_name__( self ):
if self.task == "multiple-choice":
lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase__ : List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 678 |
'''simple docstring'''
import re
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
a_ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowercase__ ,lowercase__ ) )
if __name__ == "__main__":
a_ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 685 | 0 |
def _lowercase ( a__ : Tuple ) -> bool:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError("check_bouncy() accepts only integer arguments" )
_UpperCamelCase = str(lowercase__ )
_UpperCamelCase = "".join(sorted(lowercase__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _lowercase ( a__ : Dict = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_00:
raise ValueError("solution() only accepts values from 0 to 100" )
_UpperCamelCase = 0
_UpperCamelCase = 1
while True:
if check_bouncy(lowercase__ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(9_9)}''')
| 147 |
'''simple docstring'''
import argparse
import os
import re
a_ = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
a_ = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __UpperCAmelCase (lowercase__ ,lowercase__ = False ) -> List[Any]:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.read()
a_ = content.split("\n" )
a_ = []
a_ = 0
while line_idx < len(lowercase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a_ = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a_ = sorted(lowercase__ ,key=lambda lowercase__ : _re_identifier.search(lowercase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(lowercase__ ) )
elif "\n".join(lowercase__ ) != content:
return True
def __UpperCAmelCase (lowercase__ = False ) -> Optional[int]:
'''simple docstring'''
a_ = [os.path.join(lowercase__ ,lowercase__ ) for f in os.listdir(lowercase__ ) if f.endswith(".py" )]
a_ = [sort_auto_mapping(lowercase__ ,overwrite=lowercase__ ) for fname in fnames]
if not overwrite and any(lowercase__ ):
a_ = [f for f, d in zip(lowercase__ ,lowercase__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(lowercase__ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 685 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : Any ):
__snake_case = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
__snake_case = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(snake_case_ )
from datasets import load_dataset
__snake_case = load_dataset("nielsr/rvlcdip-demo" )
__snake_case = dataset["train"][0]["image"].convert("RGB" )
__snake_case = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
# forward pass
with torch.no_grad():
__snake_case = model(**snake_case_ )
__snake_case = outputs.logits
__snake_case = torch.Size((1, 16) )
self.assertEqual(logits.shape , snake_case_ )
__snake_case = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=snake_case_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case_ , atol=1e-4 ) )
| 163 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case ={
"""configuration_vision_text_dual_encoder""": ["""VisionTextDualEncoderConfig"""],
"""processing_vision_text_dual_encoder""": ["""VisionTextDualEncoderProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =["""VisionTextDualEncoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =["""FlaxVisionTextDualEncoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =["""TFVisionTextDualEncoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 133 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[Any]) ->List[str]:
'''simple docstring'''
a_ = [
[],
[],
[],
]
def _lowerCAmelCase ( self: Dict , a: int , a: int) ->None:
'''simple docstring'''
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(a)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__( self: Dict) ->str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any) ->List[str]:
'''simple docstring'''
a_ = []
def _lowerCAmelCase ( self: int , a: int) ->None:
'''simple docstring'''
if len(self.queue) == 1_00:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(a)
def _lowerCAmelCase ( self: List[str]) ->int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
a_ = min(self.queue)
self.queue.remove(a)
return data
def __str__( self: Optional[int]) ->str:
'''simple docstring'''
return str(self.queue)
def __UpperCAmelCase () -> Union[str, Any]:
'''simple docstring'''
a_ = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 0 |
"""simple docstring"""
from string import ascii_uppercase
__SCREAMING_SNAKE_CASE : List[str] = {char: i for i, char in enumerate(ascii_uppercase)}
__SCREAMING_SNAKE_CASE : Union[str, Any] = dict(enumerate(ascii_uppercase))
def lowerCAmelCase_( lowercase_ : Union[str, Any] , lowercase_ : Tuple ) -> str:
_lowerCamelCase = len(lowercase__ )
_lowerCamelCase = 0
while True:
if x == i:
_lowerCamelCase = 0
if len(lowercase__ ) == len(lowercase__ ):
break
key += key[i]
i += 1
return key
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : Dict ) -> str:
_lowerCamelCase = ''''''
_lowerCamelCase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
_lowerCamelCase = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : Dict ) -> str:
_lowerCamelCase = ''''''
_lowerCamelCase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
_lowerCamelCase = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def lowerCAmelCase_( ) -> None:
_lowerCamelCase = '''THE GERMAN ATTACK'''
_lowerCamelCase = '''SECRET'''
_lowerCamelCase = generate_key(lowercase__ , lowercase__ )
_lowerCamelCase = cipher_text(lowercase__ , lowercase__ )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(lowercase__ , lowercase__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 661 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a_ = Dataset.from_dict(lowercase__ )
return dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = get_dataset()
a_ = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = get_dataset()
a_ , a_ = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2)
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a)
| 685 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE_ = """\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=8 ) -> str:
a_ : str = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a_ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class snake_case_ ( lowercase_ ):
def __init__( self , a_ , a_ , a_ , ):
super().__init__()
self.register_modules(
unet=a_ , scheduler=a_ , movq=a_ , )
a_ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case_ ( self , a_ , a_ , a_ , a_ , a_ , a_ ):
if latents is None:
a_ : List[Any] = randn_tensor(a_ , generator=a_ , device=a_ , dtype=a_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
a_ : Tuple = latents.to(a_ )
a_ : Tuple = latents * scheduler.init_noise_sigma
return latents
def snake_case_ ( self , a_=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
a_ : Dict = torch.device(F"""cuda:{gpu_id}""" )
a_ : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a_ , a_ )
def snake_case_ ( self , a_=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
a_ : Dict = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a_ : Optional[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
a_ , a_ : List[Any] = cpu_offload_with_hook(a_ , a_ , prev_module_hook=a_ )
# We'll offload the last model manually.
a_ : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case_ ( self ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a_ )
def __call__( self , a_ , a_ , a_ = 5_1_2 , a_ = 5_1_2 , a_ = 1_0_0 , a_ = 4.0 , a_ = 1 , a_ = None , a_ = None , a_ = "pil" , a_ = True , ):
a_ : List[Any] = self._execution_device
a_ : List[Any] = guidance_scale > 1.0
if isinstance(a_ , a_ ):
a_ : Tuple = torch.cat(a_ , dim=0 )
a_ : List[str] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(a_ , a_ ):
a_ : Union[str, Any] = torch.cat(a_ , dim=0 )
if do_classifier_free_guidance:
a_ : Union[str, Any] = image_embeds.repeat_interleave(a_ , dim=0 )
a_ : Tuple = negative_image_embeds.repeat_interleave(a_ , dim=0 )
a_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a_ )
self.scheduler.set_timesteps(a_ , device=a_ )
a_ : Any = self.scheduler.timesteps
a_ : List[Any] = self.unet.config.in_channels
a_ , a_ : Optional[Any] = downscale_height_and_width(a_ , a_ , self.movq_scale_factor )
# create initial latent
a_ : Optional[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , a_ , a_ , a_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(a_ ) ):
# expand the latents if we are doing classifier free guidance
a_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a_ : Tuple = {"image_embeds": image_embeds}
a_ : Tuple = self.unet(
sample=a_ , timestep=a_ , encoder_hidden_states=a_ , added_cond_kwargs=a_ , return_dict=a_ , )[0]
if do_classifier_free_guidance:
a_ , a_ : str = noise_pred.split(latents.shape[1] , dim=1 )
a_ , a_ : str = noise_pred.chunk(2 )
a_ , a_ : Union[str, Any] = variance_pred.chunk(2 )
a_ : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a_ : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a_ , a_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a_ : int = self.scheduler.step(
a_ , a_ , a_ , generator=a_ , )[0]
# post-processing
a_ : Dict = self.movq.decode(a_ , force_not_quantize=a_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
a_ : Tuple = image * 0.5 + 0.5
a_ : Optional[int] = image.clamp(0 , 1 )
a_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a_ : Union[str, Any] = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ ) | 237 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , *a: str , **a: Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 | 0 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase ( lowercase_ ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict=13 , UpperCamelCase_ : List[str]=7 , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : int=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : int=True , UpperCamelCase_ : Dict=False , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Dict=False , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : Union[str, Any]=99 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Optional[int]=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : int=4 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Optional[int]=512 , UpperCamelCase_ : str=12 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Optional[int]="last" , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , ):
"""simple docstring"""
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_lengths
__A = use_token_type_ids
__A = use_labels
__A = gelu_activation
__A = sinusoidal_embeddings
__A = causal
__A = asm
__A = n_langs
__A = vocab_size
__A = n_special
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = summary_type
__A = use_proj
__A = scope
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_input_lengths:
__A = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A = ids_tensor([self.batch_size] , 2 ).float()
__A = ids_tensor([self.batch_size] , self.num_choices )
__A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , ):
"""simple docstring"""
__A = FlaubertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , lengths=UpperCamelCase_ , langs=UpperCamelCase_ )
__A = model(UpperCamelCase_ , langs=UpperCamelCase_ )
__A = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , ):
"""simple docstring"""
__A = FlaubertWithLMHeadModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
__A = FlaubertForQuestionAnsweringSimple(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ )
__A = model(UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
__A = FlaubertForQuestionAnswering(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ )
__A = model(
UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , cls_index=UpperCamelCase_ , is_impossible=UpperCamelCase_ , p_mask=UpperCamelCase_ , )
__A = model(
UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , cls_index=UpperCamelCase_ , is_impossible=UpperCamelCase_ , )
((__A ) , ) = result_with_labels.to_tuple()
__A = model(UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
((__A ) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Dict , ):
"""simple docstring"""
__A = FlaubertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ )
__A = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , ):
"""simple docstring"""
__A = self.num_labels
__A = FlaubertForTokenClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , ):
"""simple docstring"""
__A = self.num_choices
__A = FlaubertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __lowercase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase_ ( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple=False ):
"""simple docstring"""
__A = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
__A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
return inputs_dict
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = FlaubertModelTester(self )
__A = ConfigTester(self , config_class=UpperCamelCase_ , emb_dim=37 )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*UpperCamelCase_ )
@slow
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = FlaubertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
@require_torch_gpu
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__A = True
__A = model_class(config=UpperCamelCase_ )
__A = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__A = torch.jit.trace(
UpperCamelCase_ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase_ , os.path.join(UpperCamelCase_ , """traced_model.pt""" ) )
__A = torch.jit.load(os.path.join(UpperCamelCase_ , """traced_model.pt""" ) , map_location=UpperCamelCase_ )
loaded(inputs_dict["""input_ids"""].to(UpperCamelCase_ ) , inputs_dict["""attention_mask"""].to(UpperCamelCase_ ) )
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
__A = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
__A = model(UpperCamelCase_ )[0]
__A = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase_ )
__A = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1e-4 ) )
| 637 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Any , a: Path , a: Union[str, None] = None , a: Union[List[str], None] = None , a: Union[str, List[str], None] = None , a: bool = True , ) ->Optional[Any]:
'''simple docstring'''
a_ = [file for file in os.listdir(a) if os.path.isfile(os.path.join(a , a))]
if identifier is not None:
a_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a):
for n_ in n_identifier:
a_ = [file for file in files if n_ not in file]
else:
a_ = [file for file in files if n_identifier not in file]
a_ = ignore_files or []
ignore_files.append("__init__.py")
a_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a)
if only_modules:
a_ = file.split(".")[0]
try:
a_ = getattr(a , a)
a_ = doctest.DocTestSuite(a)
a_ = unittest.TextTestRunner().run(a)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
a_ = doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "modeling"
a_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "tokenization"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "configuration"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a)
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = Path("docs/source")
a_ = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a)
| 685 | 0 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__magic_name__ : Any = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
inspect_dataset(lowercase__ , lowercase__ )
_snake_case = path + ".py"
assert script_name in os.listdir(lowercase__ )
assert "__pycache__" not in os.listdir(lowercase__ )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
inspect_metric(lowercase__ , lowercase__ )
_snake_case = path + ".py"
assert script_name in os.listdir(lowercase__ )
assert "__pycache__" not in os.listdir(lowercase__ )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = get_dataset_config_info(lowercase__ , config_name=lowercase__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
with pytest.raises(lowercase__ ):
get_dataset_config_info(lowercase__ , config_name=lowercase__ )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = get_dataset_config_names(lowercase__ )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = get_dataset_infos(lowercase__ )
assert list(infos.keys() ) == expected_configs
_snake_case = expected_configs[0]
assert expected_config in infos
_snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = get_dataset_infos(lowercase__ )
assert expected_config in infos
_snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
with pytest.raises(lowercase__ ):
get_dataset_split_names(lowercase__ , config_name=lowercase__ )
| 672 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 100 ) -> int:
'''simple docstring'''
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=128 , A_=32 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> int:
"""simple docstring"""
_lowercase: List[str] = parent
_lowercase: Tuple = batch_size
_lowercase: Union[str, Any] = seq_length
_lowercase: Any = is_training
_lowercase: Tuple = use_input_mask
_lowercase: Optional[int] = use_token_type_ids
_lowercase: List[str] = use_labels
_lowercase: List[Any] = vocab_size
_lowercase: Optional[int] = hidden_size
_lowercase: Optional[Any] = num_hidden_layers
_lowercase: Union[str, Any] = num_attention_heads
_lowercase: int = intermediate_size
_lowercase: Union[str, Any] = hidden_act
_lowercase: List[str] = hidden_dropout_prob
_lowercase: Optional[Any] = attention_probs_dropout_prob
_lowercase: Tuple = max_position_embeddings
_lowercase: List[str] = type_vocab_size
_lowercase: List[Any] = type_sequence_label_size
_lowercase: int = initializer_range
_lowercase: int = num_labels
_lowercase: Tuple = num_choices
_lowercase: Optional[Any] = scope
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowercase: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase: Optional[Any] = None
if self.use_input_mask:
_lowercase: Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase: Optional[Any] = None
if self.use_token_type_ids:
_lowercase: int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase: Union[str, Any] = None
_lowercase: List[Any] = None
_lowercase: Any = None
if self.use_labels:
_lowercase: List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase: str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase: int = ids_tensor([self.batch_size] , self.num_choices )
_lowercase: Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def lowercase_ ( self ) -> Any:
"""simple docstring"""
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
): Tuple = self.prepare_config_and_inputs()
_lowercase: Tuple = True
_lowercase: List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowercase: List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
_lowercase: Union[str, Any] = NezhaModel(config=A_ )
model.to(A_ )
model.eval()
_lowercase: Union[str, Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ )
_lowercase: Optional[int] = model(A_ , token_type_ids=A_ )
_lowercase: Any = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> int:
"""simple docstring"""
_lowercase: Dict = True
_lowercase: Optional[Any] = NezhaModel(A_ )
model.to(A_ )
model.eval()
_lowercase: List[Any] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
_lowercase: Optional[Any] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , encoder_hidden_states=A_ , )
_lowercase: str = model(A_ , attention_mask=A_ , token_type_ids=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
"""simple docstring"""
_lowercase: List[str] = NezhaForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
_lowercase: Optional[int] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
"""simple docstring"""
_lowercase: List[str] = NezhaForNextSentencePrediction(config=A_ )
model.to(A_ )
model.eval()
_lowercase: Optional[int] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
"""simple docstring"""
_lowercase: Union[str, Any] = NezhaForPreTraining(config=A_ )
model.to(A_ )
model.eval()
_lowercase: Union[str, Any] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , next_sentence_label=A_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
_lowercase: List[str] = NezhaForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
_lowercase: Tuple = model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowercase: List[str] = self.num_labels
_lowercase: str = NezhaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
_lowercase: List[Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
"""simple docstring"""
_lowercase: Dict = self.num_labels
_lowercase: Union[str, Any] = NezhaForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
_lowercase: Optional[int] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
_lowercase: Any = self.num_choices
_lowercase: Optional[Any] = NezhaForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
_lowercase: Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase: str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase: Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase: Dict = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
): int = config_and_inputs
_lowercase: Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase_ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ = True
def lowercase_ ( self , A_ , A_ , A_=False ) -> Optional[int]:
"""simple docstring"""
_lowercase: int = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class in get_values(A_ ):
_lowercase: Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A_ )
_lowercase: List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
return inputs_dict
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowercase: Optional[Any] = NezhaModelTester(self )
_lowercase: Union[str, Any] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> str:
"""simple docstring"""
_lowercase: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
): str = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowercase: Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(
A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , )
def lowercase_ ( self ) -> str:
"""simple docstring"""
_lowercase: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def lowercase_ ( self ) -> str:
"""simple docstring"""
_lowercase: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*A_ )
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A_ )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowercase: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase: int = NezhaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@slow
@require_torch_gpu
def lowercase_ ( self ) -> Any:
"""simple docstring"""
_lowercase , _lowercase: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_lowercase: int = True
_lowercase: Union[str, Any] = model_class(config=A_ )
_lowercase: Union[str, Any] = self._prepare_for_class(A_ , A_ )
_lowercase: int = torch.jit.trace(
A_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A_ , os.path.join(A_ , '''bert.pt''' ) )
_lowercase: Tuple = torch.jit.load(os.path.join(A_ , '''bert.pt''' ) , map_location=A_ )
loaded(inputs_dict['''input_ids'''].to(A_ ) , inputs_dict['''attention_mask'''].to(A_ ) )
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowercase: int = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
_lowercase: List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_lowercase: Dict = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowercase: Tuple = model(A_ , attention_mask=A_ )[0]
_lowercase: str = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , A_ )
_lowercase: List[str] = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1E-4 ) )
@slow
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: Tuple = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
_lowercase: Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_lowercase: Any = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowercase: List[Any] = model(A_ , attention_mask=A_ )[0]
_lowercase: Optional[Any] = torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , A_ )
_lowercase: Any = torch.tensor(
[[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1E-4 ) )
| 353 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =(PNDMScheduler,)
_UpperCAmelCase =(('''num_inference_steps''', 50),)
def _lowerCAmelCase ( self: int , **a: Optional[int]) ->Any:
'''simple docstring'''
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a)
return config
def _lowerCAmelCase ( self: Any , a: Tuple=0 , **a: Any) ->Any:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
new_scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Union[str, Any] , a: str=0 , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
# copy over dummy past residuals
new_scheduler.set_timesteps(a)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: Dict , **a: int) ->Any:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.prk_timesteps):
a_ = model(a , a)
a_ = scheduler.step_prk(a , a , a).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ = model(a , a)
a_ = scheduler.step_plms(a , a , a).prev_sample
return sample
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , "set_timesteps"):
scheduler.set_timesteps(a)
elif num_inference_steps is not None and not hasattr(a , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , 0 , a , **a).prev_sample
a_ = scheduler.step_prk(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step_plms(a , 0 , a , **a).prev_sample
a_ = scheduler.step_plms(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(steps_offset=1)
a_ = scheduler_class(**a)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]) , )
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=a , beta_end=a)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=a)
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = 27
for scheduler_class in self.scheduler_classes:
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ = scheduler.step_prk(a , a , a).prev_sample
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
with self.assertRaises(a):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.full_loop()
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 685 | 0 |
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 575 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "bird"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
a_ = pipe.prepare_image_inputs([canny_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "Chef in the kitchen"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
a_ = pipe.prepare_image_inputs([pose_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 685 | 0 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _lowerCamelCase ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ):
if isinstance(lowercase__ , lowercase__ ):
lowercase__ : Dict = np.full((len(lowercase__ ), sequence_length, 2) , lowercase__ )
else:
lowercase__ : Dict = np.full((len(lowercase__ ), sequence_length) , lowercase__ )
for i, tensor in enumerate(lowercase__ ):
if padding_side == "right":
if isinstance(lowercase__ , lowercase__ ):
lowercase__ : int = tensor[:sequence_length]
else:
lowercase__ : Dict = tensor[:sequence_length]
else:
if isinstance(lowercase__ , lowercase__ ):
lowercase__ : Dict = tensor[:sequence_length]
else:
lowercase__ : Union[str, Any] = tensor[:sequence_length]
return out_tensor.tolist()
def _lowerCamelCase ( lowerCamelCase__ : Optional[Any] ):
lowercase__ : Dict = ord(lowercase__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
lowercase__ : Tuple = unicodedata.category(lowercase__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class _SCREAMING_SNAKE_CASE ( lowercase_ ):
"""simple docstring"""
_a : Optional[Any] = 42
_a : str = True
_a : int = None
_a : str = None
_a : int = -1_00
_a : List[Any] = '''pt'''
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Optional[int]:
import torch
lowercase__ : Tuple = """label""" if """label""" in features[0].keys() else """labels"""
lowercase__ : List[str] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowercase__ : Union[str, Any] = self.tokenizer.pad(
lowerCamelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
lowercase__ : Dict = torch.tensor(batch["""entity_ids"""] ).shape[1]
lowercase__ : Any = self.tokenizer.padding_side
if padding_side == "right":
lowercase__ : Any = [
list(lowerCamelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(lowerCamelCase__ )) for label in labels
]
else:
lowercase__ : int = [
[self.label_pad_token_id] * (sequence_length - len(lowerCamelCase__ )) + list(lowerCamelCase__ ) for label in labels
]
lowercase__ : Tuple = [feature["""ner_tags"""] for feature in features]
lowercase__ : Union[str, Any] = padding_tensor(lowerCamelCase__ , -1 , lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : int = [feature["""original_entity_spans"""] for feature in features]
lowercase__ : List[str] = padding_tensor(lowerCamelCase__ , (-1, -1) , lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Optional[int] = {k: torch.tensor(lowerCamelCase__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch | 200 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 1000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 ,lowercase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __lowerCAmelCase ( UpperCamelCase ) -> Union[str, Any]:
def is_in_circle(UpperCamelCase , UpperCamelCase ) -> bool:
lowerCAmelCase__ : Optional[Any] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowerCAmelCase__ : int = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowercase__ ) )
# The ratio of the area for circle to square is pi/4.
lowerCAmelCase__ : int = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 0.0 , UpperCamelCase = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(lowercase__ , lowercase__ ) ) for _ in range(lowercase__ ) ) * (max_value - min_value)
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase = 0.0 , UpperCamelCase = 1.0 ) -> None:
def identity_function(UpperCamelCase ) -> float:
return x
lowerCAmelCase__ : str = area_under_curve_estimator(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase__ : Tuple = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print('''******************''' )
def __lowerCAmelCase ( UpperCamelCase ) -> None:
def function_to_integrate(UpperCamelCase ) -> float:
return sqrt(4.0 - x * x )
lowerCAmelCase__ : Optional[int] = area_under_curve_estimator(
lowercase__ , lowercase__ , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 678 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 ,lowercase__ ,2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def __UpperCAmelCase (lowercase__ = 999966663333 ) -> int:
'''simple docstring'''
a_ = math.floor(math.sqrt(lowercase__ ) ) + 100
a_ = prime_sieve(lowercase__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 685 | 0 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class lowerCamelCase_ ( lowercase_ ):
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> None:
"""simple docstring"""
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 147 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = UniSpeechSatForSequenceClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["projector.weight"]
a_ = downstream_dict["projector.bias"]
a_ = downstream_dict["model.post_net.linear.weight"]
a_ = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["model.linear.weight"]
a_ = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = UniSpeechSatForXVector.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["connector.weight"]
a_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
a_ = checkpoint["Downstream"]
a_ = UniSpeechSatConfig.from_pretrained(lowercase__ )
a_ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ ,return_attention_mask=lowercase__ ,do_normalize=lowercase__ )
a_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ = convert_classification(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ = convert_diarization(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForXVector" ):
a_ = convert_xvector(lowercase__ ,lowercase__ ,lowercase__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 685 | 0 |
"""simple docstring"""
from ... import PretrainedConfig
_SCREAMING_SNAKE_CASE = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class __magic_name__ ( lowercase_ ):
_SCREAMING_SNAKE_CASE : int = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
_SCREAMING_SNAKE_CASE : int = 'nezha'
def __init__( self : Any , snake_case_ : int=21128 , snake_case_ : Tuple=768 , snake_case_ : Any=12 , snake_case_ : Tuple=12 , snake_case_ : Tuple=3072 , snake_case_ : Any="gelu" , snake_case_ : Any=0.1 , snake_case_ : int=0.1 , snake_case_ : Optional[int]=512 , snake_case_ : Any=64 , snake_case_ : Optional[Any]=2 , snake_case_ : str=0.02 , snake_case_ : Union[str, Any]=1e-12 , snake_case_ : str=0.1 , snake_case_ : Tuple=0 , snake_case_ : Dict=2 , snake_case_ : Any=3 , snake_case_ : Union[str, Any]=True , **snake_case_ : int , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = max_relative_position
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = classifier_dropout
__snake_case = use_cache
| 163 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 685 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( lowercase_ ):
def __init__( self : Optional[Any] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[int] ) -> None:
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 133 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = json.loads(f.read() )
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ = b
a_ = idx
for wd in b:
a_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , a: Union[str, Any] , a: Optional[int] , a: List[str]="<|endoftext|>" , a: Union[str, Any]="<|endoftext|>" , a: Dict="<|startoftext|>" , a: Dict="<|endoftext|>" , a: Union[str, Any]=False , **a: Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ = do_clean_text
a_ , a_ , a_ , a_ = load_vocab_and_emoji(a , a)
a_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return len(self.raw_vocab)
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _lowerCAmelCase ( self: Union[str, Any] , a: Any) ->Dict:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text)
def _lowerCAmelCase ( self: int , a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a)
def _lowerCAmelCase ( self: Optional[int] , a: Any) ->str:
'''simple docstring'''
a_ = "".join(a).strip()
return out_string
def _lowerCAmelCase ( self: Any , a: "Conversation") ->List[int]:
'''simple docstring'''
a_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
a_ = input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self: int , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = 0
if os.path.isdir(a):
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ = token_index
writer.write(",".join(a) + "\n")
index += 1
with open(a , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , a)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: Any , a: Union[str, Any] , a: Any) ->List[Any]:
'''simple docstring'''
a_ = vocab # same as swe
a_ = ids_to_tokens # same as bpe
a_ = emoji
a_ = np.max([len(a) for w in self.vocab.keys()])
a_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Dict) ->Any:
'''simple docstring'''
return len(self.ids_to_tokens)
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple) ->Any:
'''simple docstring'''
a_ = self.content_repattera.sub("<URL>" , a)
a_ = self.content_repattera.sub("<EMAIL>" , a)
a_ = self.content_repattera.sub("<TEL>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<PRICE>" , a)
a_ = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _lowerCAmelCase ( self: Any , a: int , a: Optional[int]=False) ->List[str]:
'''simple docstring'''
a_ = text.replace(" " , "<SP>")
a_ = text.replace(" " , "<SP>")
a_ = text.replace("\r\n" , "<BR>")
a_ = text.replace("\n" , "<BR>")
a_ = text.replace("\r" , "<BR>")
a_ = text.replace("\t" , "<TAB>")
a_ = text.replace("—" , "ー")
a_ = text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ = text.replace(a , a)
if clean:
a_ = self.clean_text(a)
def check_simbol(a: Dict):
a_ = x.encode()
if len(a) == 1 and len(a) == 2:
a_ = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a: str):
a_ = x.encode()
if len(a) == 1 and len(a) == 3:
a_ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
a_ = 0
a_ = []
while pos < len(a):
a_ = min(len(a) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ = [] # (token_id, token, pos)
for e in range(a , a , -1):
a_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a) > 2:
a_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(a) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ = sorted(a , key=lambda a: x[0])[0]
result.append(a)
a_ = e
else:
a_ = pos + 1
a_ = text[pos:end]
if check_simbol(a):
result.append("<KIGOU>")
elif checkuae(a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ = end
return result
def _lowerCAmelCase ( self: int , a: List[Any] , a: Any="\n") ->str:
'''simple docstring'''
a_ = []
a_ = []
a_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(a)
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = "".join(a)
return text
| 685 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_( lowercase_ ):
'''simple docstring'''
lowercase__ : List[Any] = (PNDMScheduler,)
lowercase__ : int = (('num_inference_steps', 50),)
def snake_case__ ( self , **lowerCamelCase__ ):
_lowerCamelCase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCamelCase__ )
return config
def snake_case__ ( self , lowerCamelCase__=0 , **lowerCamelCase__ ):
_lowerCamelCase = dict(self.forward_default_kwargs )
_lowerCamelCase = kwargs.pop('''num_inference_steps''' , lowerCamelCase__ )
_lowerCamelCase = self.dummy_sample
_lowerCamelCase = 0.1 * sample
_lowerCamelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_lowerCamelCase = self.get_scheduler_config(**lowerCamelCase__ )
_lowerCamelCase = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
_lowerCamelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
_lowerCamelCase = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
_lowerCamelCase = dummy_past_residuals[:]
_lowerCamelCase = scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
_lowerCamelCase = new_scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowerCamelCase = scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
_lowerCamelCase = new_scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case__ ( self ):
pass
def snake_case__ ( self , lowerCamelCase__=0 , **lowerCamelCase__ ):
_lowerCamelCase = dict(self.forward_default_kwargs )
_lowerCamelCase = kwargs.pop('''num_inference_steps''' , lowerCamelCase__ )
_lowerCamelCase = self.dummy_sample
_lowerCamelCase = 0.1 * sample
_lowerCamelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCamelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
_lowerCamelCase = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
_lowerCamelCase = dummy_past_residuals[:]
_lowerCamelCase = scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
_lowerCamelCase = new_scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowerCamelCase = scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
_lowerCamelCase = new_scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case__ ( self , **lowerCamelCase__ ):
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config(**lowerCamelCase__ )
_lowerCamelCase = scheduler_class(**lowerCamelCase__ )
_lowerCamelCase = 1_0
_lowerCamelCase = self.dummy_model()
_lowerCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowerCamelCase = model(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowerCamelCase = model(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
return sample
def snake_case__ ( self ):
_lowerCamelCase = dict(self.forward_default_kwargs )
_lowerCamelCase = kwargs.pop('''num_inference_steps''' , lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**lowerCamelCase__ )
_lowerCamelCase = self.dummy_sample
_lowerCamelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ , '''set_timesteps''' ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ , '''set_timesteps''' ):
_lowerCamelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCamelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
_lowerCamelCase = dummy_past_residuals[:]
_lowerCamelCase = scheduler.step_prk(lowerCamelCase__ , 0 , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
_lowerCamelCase = scheduler.step_prk(lowerCamelCase__ , 1 , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_lowerCamelCase = scheduler.step_plms(lowerCamelCase__ , 0 , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
_lowerCamelCase = scheduler.step_plms(lowerCamelCase__ , 1 , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case__ ( self ):
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def snake_case__ ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def snake_case__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowerCamelCase__ , beta_end=lowerCamelCase__ )
def snake_case__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def snake_case__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def snake_case__ ( self ):
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=lowerCamelCase__ )
def snake_case__ ( self ):
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = 2_7
for scheduler_class in self.scheduler_classes:
_lowerCamelCase = self.dummy_sample
_lowerCamelCase = 0.1 * sample
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowerCamelCase = scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
def snake_case__ ( self ):
with self.assertRaises(lowerCamelCase__ ):
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**lowerCamelCase__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def snake_case__ ( self ):
_lowerCamelCase = self.full_loop()
_lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) )
_lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def snake_case__ ( self ):
_lowerCamelCase = self.full_loop(prediction_type='''v_prediction''' )
_lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) )
_lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def snake_case__ ( self ):
_lowerCamelCase = self.full_loop(set_alpha_to_one=lowerCamelCase__ , beta_start=0.0_1 )
_lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) )
_lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def snake_case__ ( self ):
_lowerCamelCase = self.full_loop(set_alpha_to_one=lowerCamelCase__ , beta_start=0.0_1 )
_lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) )
_lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 661 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , a: Optional[Any] , a: Dict=13 , a: List[str]=7 , a: Optional[Any]=True , a: int=True , a: Any=True , a: Optional[int]=True , a: int=True , a: Dict=False , a: Union[str, Any]=False , a: Dict=False , a: List[str]=2 , a: Union[str, Any]=99 , a: List[Any]=0 , a: Optional[int]=32 , a: List[str]=5 , a: int=4 , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Optional[int]=5_12 , a: str=12 , a: Dict=2 , a: Any=0.02 , a: Optional[int]=3 , a: str=4 , a: Optional[int]="last" , a: Tuple=None , a: Any=None , ) ->int:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_lengths
a_ = use_token_type_ids
a_ = use_labels
a_ = gelu_activation
a_ = sinusoidal_embeddings
a_ = causal
a_ = asm
a_ = n_langs
a_ = vocab_size
a_ = n_special
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = summary_type
a_ = use_proj
a_ = scope
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_input_lengths:
a_ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , 2).float()
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: List[Any] , a: List[Any] , a: Optional[int] , a: int , a: str , a: Any , a: str , a: List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModel(config=a)
model.to(a)
model.eval()
a_ = model(a , lengths=a , langs=a)
a_ = model(a , langs=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Optional[Any] , a: Dict , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Any , a: Tuple , a: str , a: List[str] , ) ->Dict:
'''simple docstring'''
a_ = FlaubertWithLMHeadModel(a)
model.to(a)
model.eval()
a_ = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: Optional[Any] , a: List[Any] , a: List[str] , a: List[str] , a: List[str] , a: Optional[Any] , a: str , a: Union[str, Any] , ) ->str:
'''simple docstring'''
a_ = FlaubertForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Optional[Any] , a: Any , a: Dict , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , ) ->int:
'''simple docstring'''
a_ = FlaubertForQuestionAnswering(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((a_) , ) = result_with_labels.to_tuple()
a_ = model(a , start_positions=a , end_positions=a)
((a_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Union[str, Any] , a: Any , a: Tuple , a: Union[str, Any] , a: int , a: int , a: Dict , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self: str , a: List[str] , a: Dict , a: Tuple , a: Optional[Any] , a: Any , a: Any , a: str , a: str , a: Optional[Any] , ) ->List[Any]:
'''simple docstring'''
a_ = self.num_labels
a_ = FlaubertForTokenClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: Dict , a: Tuple , a: List[Any] , a: Dict , a: Optional[Any] , a: Optional[Any] , a: Optional[Any] , a: Union[str, Any] , a: List[str] , a: Tuple , ) ->Dict:
'''simple docstring'''
a_ = self.num_choices
a_ = FlaubertForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: Any) ->List[Any]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self: Optional[Any] , a: List[Any] , a: Any , a: List[str] , a: Union[str, Any] , a: int) ->int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: List[Any] , a: Tuple=False) ->List[Any]:
'''simple docstring'''
a_ = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModelTester(self)
a_ = ConfigTester(self , config_class=a , emb_dim=37)
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a)
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a)
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a)
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a)
@slow
def _lowerCAmelCase ( self: Any) ->Any:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = FlaubertModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ = True
a_ = model_class(config=a)
a_ = self._prepare_for_class(a , a)
a_ = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt"))
a_ = torch.jit.load(os.path.join(a , "traced_model.pt") , map_location=a)
loaded(inputs_dict["input_ids"].to(a) , inputs_dict["attention_mask"].to(a))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
a_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
a_ = model(a)[0]
a_ = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4))
| 685 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> list[list]:
a_ : Optional[int] = current_set.copy()
for row_index, row in enumerate(lowercase__ ):
a_ : int = row[0]
for column_index, column in enumerate(lowercase__ ):
if magnitude == 0:
a_ : str = column
continue
a_ : Dict = column / magnitude
# Subtract to cancel term
a_ : Dict = current_set[0]
a_ : List[str] = [first_row]
a_ : Optional[Any] = current_set[1::]
for row in current_set:
a_ : Tuple = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowercase__ )
continue
for column_index in range(len(lowercase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowercase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
a_ : int = final_set[0]
a_ : Dict = []
a_ : Optional[int] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
a_ : List[Any] = simplify(lowercase__ )
for i in range(len(lowercase__ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, lowercase__ )
a_ : Optional[Any] = resultant
return final_set
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> list:
if len(lowercase__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
a_ : Optional[int] = len(lowercase__ ) + 1
if any(len(lowercase__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(lowercase__, (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(lowercase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
a_ : Optional[int] = equations.copy()
if any(0 in row for row in data_set ):
a_ : Union[str, Any] = data_set.copy()
a_ : int = []
for row_index, row in enumerate(lowercase__ ):
if 0 not in row:
a_ : Union[str, Any] = data_set.pop(lowercase__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0, lowercase__ )
a_ : int = data_set.copy()
a_ : Tuple = simplify(lowercase__ )
a_ : Any = simplified[::-1]
a_ : List[Any] = []
for row in simplified:
a_ : Optional[Any] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
a_ : Any = row.copy()[: len(lowercase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowercase__ ) == 0:
solutions.append(0 )
continue
a_ : str = temp_row[1::]
a_ : Any = temp_row[::-1]
for column_index, column in enumerate(lowercase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowercase__ )
a_ : Optional[int] = []
for item in solutions:
final.append(float(round(lowercase__, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]])) | 237 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase (lowercase__ = 10001 ) -> int:
'''simple docstring'''
try:
a_ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a_ = []
a_ = 2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
from __future__ import annotations
from collections import namedtuple
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : Union[str, Any] , __lowercase : int ) -> tuple:
"""simple docstring"""
__A = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 637 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for char in word:
_snake_case = ord(lowercase__ )
if not _is_chinese_char(lowercase__ ):
return 0
return 1
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = set()
for token in tokens:
_snake_case = len(lowercase__ ) > 1 and is_chinese(lowercase__ )
if chinese_word:
word_set.add(lowercase__ )
_snake_case = list(lowercase__ )
return word_list
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_snake_case = max([len(lowercase__ ) for w in chinese_word_set] )
_snake_case = bert_tokens
_snake_case , _snake_case = 0, len(lowercase__ )
while start < end:
_snake_case = True
if is_chinese(bert_word[start] ):
_snake_case = min(end - start , lowercase__ )
for i in range(lowercase__ , 1 , -1 ):
_snake_case = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_snake_case = "##" + bert_word[j]
_snake_case = start + i
_snake_case = False
break
if single_word:
start += 1
return bert_word
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = []
for i in range(0 , len(lowercase__ ) , 1_00 ):
_snake_case = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
_snake_case = [get_chinese_word(lowercase__ ) for r in res]
ltp_res.extend(lowercase__ )
assert len(lowercase__ ) == len(lowercase__ )
_snake_case = []
for i in range(0 , len(lowercase__ ) , 1_00 ):
_snake_case = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=lowercase__ , truncation=lowercase__ , max_length=5_12 )
bert_res.extend(res["input_ids"] )
assert len(lowercase__ ) == len(lowercase__ )
_snake_case = []
for input_ids, chinese_word in zip(lowercase__ , lowercase__ ):
_snake_case = []
for id in input_ids:
_snake_case = bert_tokenizer._convert_id_to_token(lowercase__ )
input_tokens.append(lowercase__ )
_snake_case = add_sub_symbol(lowercase__ , lowercase__ )
_snake_case = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowercase__ ):
if token[:2] == "##":
_snake_case = token[2:]
# save chinese tokens' pos
if len(lowercase__ ) == 1 and _is_chinese_char(ord(lowercase__ ) ):
ref_id.append(lowercase__ )
ref_ids.append(lowercase__ )
assert len(lowercase__ ) == len(lowercase__ )
return ref_ids
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
_snake_case = f.readlines()
_snake_case = [line.strip() for line in data if len(lowercase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_snake_case = LTP(args.ltp ) # faster in GPU device
_snake_case = BertTokenizer.from_pretrained(args.bert )
_snake_case = prepare_ref(lowercase__ , lowercase__ , lowercase__ )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
_snake_case = [json.dumps(lowercase__ ) + "\n" for ref in ref_ids]
f.writelines(lowercase__ )
if __name__ == "__main__":
__magic_name__ : int = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
__magic_name__ : List[str] = parser.parse_args()
main(args)
| 672 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) ,np.inf )
a_ = 0
a_ = np.empty((rows, cols) ,dtype=lowercase__ )
a_ = None
while queue:
((a_) , (a_)) = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ ,(dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
A__ : Dict = 1_0_0
A__ : Optional[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
A__ : Any = 4_2
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_lowercase: Dict = set()
_lowercase: Tuple = 42
_lowercase: Union[str, Any] = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _lowerCAmelCase ( _UpperCamelCase = 5_000 ):
"""simple docstring"""
for number_to_partition in range(1 , lowercase__ ):
if len(partition(lowercase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 353 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ ) as metadata_file:
a_ = json.load(lowercase__ )
a_ = LukeConfig(use_entity_aware_attention=lowercase__ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
a_ = torch.load(lowercase__ ,map_location="cpu" )["module"]
# Load the entity vocab file
a_ = load_original_entity_vocab(lowercase__ )
# add an entry for [MASK2]
a_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
a_ = AddedToken("<ent>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
a_ = AddedToken("<ent2>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"r" ) as f:
a_ = json.load(lowercase__ )
a_ = "MLukeTokenizer"
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
with open(os.path.join(lowercase__ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
a_ = tokenizer.convert_tokens_to_ids(["@"] )[0]
a_ = tokenizer.convert_tokens_to_ids(["#"] )[0]
a_ = state_dict["embeddings.word_embeddings.weight"]
a_ = word_emb[ent_init_index].unsqueeze(0 )
a_ = word_emb[enta_init_index].unsqueeze(0 )
a_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ = state_dict[bias_name]
a_ = decoder_bias[ent_init_index].unsqueeze(0 )
a_ = decoder_bias[enta_init_index].unsqueeze(0 )
a_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ = F"""encoder.layer.{layer_index}.attention.self."""
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ = state_dict["entity_embeddings.entity_embeddings.weight"]
a_ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ = state_dict["entity_predictions.bias"]
a_ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ = LukeForMaskedLM(config=lowercase__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
a_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
a_ = state_dict[key]
else:
a_ = state_dict[key]
a_ , a_ = model.load_state_dict(lowercase__ ,strict=lowercase__ )
if set(lowercase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ = MLukeTokenizer.from_pretrained(lowercase__ ,task="entity_classification" )
a_ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
a_ = (0, 9)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 33, 768) )
a_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 1, 768) )
a_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
a_ = "Tokyo is the capital of <mask>."
a_ = (24, 30)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
a_ = encoding["input_ids"][0].tolist()
a_ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
a_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase__ )
a_ = outputs.entity_logits[0][0].argmax().item()
a_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = ["[MASK]", "[PAD]", "[UNK]"]
a_ = [json.loads(lowercase__ ) for line in open(lowercase__ )]
a_ = {}
for entry in data:
a_ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ = entity_id
break
a_ = F"""{language}:{entity_name}"""
a_ = entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 685 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case__ ( ) ->str:
"""simple docstring"""
__lowercase : List[str] = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores", type=lowercase__, default=1, help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script", type=lowercase__, help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
), )
# rest from the training program
parser.add_argument("training_script_args", nargs=lowercase__ )
return parser.parse_args()
def snake_case__ ( ) ->List[Any]:
"""simple docstring"""
__lowercase : List[Any] = parse_args()
# Import training_script as a module.
__lowercase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowercase : Union[str, Any] = script_fpath.stem
__lowercase : Dict = importlib.import_module(lowercase__ )
# Patch sys.argv
__lowercase : int = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 575 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =LxmertTokenizer
_UpperCAmelCase =LxmertTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
super().setUp()
a_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Optional[Any] , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [7, 4, 5, 10, 8, 9])
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = "I was born in 92000, and this is falsé."
a_ = tokenizer.tokenize(a)
a_ = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
a_ = tokenizer.encode(a , add_special_tokens=a)
a_ = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(a)
a_ = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
| 685 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _lowerCamelCase ( ):
lowercase__ : int = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
lowercase__ : Any = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert("""RGB""" )
return image
def _lowerCamelCase ( lowerCamelCase__ : Optional[int] ):
lowercase__ : Optional[int] = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple ):
lowercase__ : Union[str, Any] = dct.pop(lowercase__ )
lowercase__ : List[str] = val
def _lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : List[str] ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase__ : int = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowercase__ : int = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowercase__ : Any = torch.cat((q_bias, torch.zeros_like(lowercase__ , requires_grad=lowercase__ ), v_bias) )
lowercase__ : Union[str, Any] = qkv_bias
def _lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : str ):
lowercase__ : Union[str, Any] = 3_64 if """coco""" in model_name else 2_24
lowercase__ : Optional[Any] = BlipaVisionConfig(image_size=lowercase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowercase__ : List[Any] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=lowercase__ ).to_dict()
elif "opt-6.7b" in model_name:
lowercase__ : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=lowercase__ ).to_dict()
elif "t5-xl" in model_name:
lowercase__ : Any = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase__ : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
lowercase__ : Optional[int] = BlipaConfig(vision_config=lowercase__ , text_config=lowercase__ )
return config, image_size
@torch.no_grad()
def _lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : str=None , lowerCamelCase__ : Dict=False ):
lowercase__ : str = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
lowercase__ : Optional[int] = tokenizer("""\n""" , add_special_tokens=lowercase__ ).input_ids[0]
lowercase__ , lowercase__ : Tuple = get_blipa_config(lowercase__ , eos_token_id=lowercase__ )
lowercase__ : Any = BlipaForConditionalGeneration(lowercase__ ).eval()
lowercase__ : Tuple = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
lowercase__ , lowercase__ : str = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
lowercase__ : List[str] = """cuda""" if torch.cuda.is_available() else """cpu"""
lowercase__ , lowercase__ , lowercase__ : Tuple = load_model_and_preprocess(
name=lowercase__ , model_type=lowercase__ , is_eval=lowercase__ , device=lowercase__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
lowercase__ : Optional[Any] = original_model.state_dict()
lowercase__ : Optional[int] = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase__ : List[str] = state_dict.pop(lowercase__ )
if key.startswith("""Qformer.bert""" ):
lowercase__ : Dict = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
lowercase__ : List[Any] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
lowercase__ : List[str] = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
lowercase__ : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
lowercase__ : Dict = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
lowercase__ : Dict = key.replace("""t5""" , """language""" )
lowercase__ : Any = val
# read in qv biases
read_in_q_v_bias(lowercase__ , lowercase__ )
lowercase__ , lowercase__ : Tuple = hf_model.load_state_dict(lowercase__ , strict=lowercase__ )
assert len(lowercase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowercase__ : Optional[Any] = load_demo_image()
lowercase__ : Any = vis_processors["""eval"""](lowercase__ ).unsqueeze(0 ).to(lowercase__ )
lowercase__ : Optional[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(lowercase__ )
# create processor
lowercase__ : List[str] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=lowercase__ , image_std=lowercase__ )
lowercase__ : Union[str, Any] = BlipaProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
lowercase__ : Union[str, Any] = processor(images=lowercase__ , return_tensors="""pt""" ).pixel_values.to(lowercase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase__ , lowercase__ )
original_model.to(lowercase__ )
hf_model.to(lowercase__ )
with torch.no_grad():
if "opt" in model_name:
lowercase__ : Any = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
lowercase__ : Optional[int] = hf_model(lowercase__ , lowercase__ ).logits
else:
lowercase__ : Optional[int] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
lowercase__ : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
lowercase__ : Any = hf_model(lowercase__ , lowercase__ , labels=lowercase__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowercase__ : List[str] = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=lowercase__ )
assert torch.allclose(logits[0, :3, :3] , lowercase__ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowercase__ : Optional[int] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=lowercase__ )
else:
# cast to same type
lowercase__ : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(lowercase__ ) , lowercase__ , atol=1E-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
lowercase__ : List[str] = """"""
lowercase__ : Optional[Any] = tokenizer(lowercase__ , return_tensors="""pt""" ).input_ids.to(lowercase__ )
lowercase__ : List[str] = original_model.generate({"""image""": original_pixel_values} )
lowercase__ : str = hf_model.generate(
lowercase__ , lowercase__ , do_sample=lowercase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , lowercase__ )
lowercase__ : List[Any] = input_ids.shape[1]
lowercase__ : Optional[Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase__ )
lowercase__ : int = [text.strip() for text in output_text]
print("""HF generation:""" , lowercase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__snake_case = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 200 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _lowerCAmelCase ( lowercase_ ):
A__ = 'trajectory_transformer'
A__ = ['past_key_values']
A__ = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __UpperCAmelCase=100 , __UpperCAmelCase=5 , __UpperCAmelCase=1 , __UpperCAmelCase=1 , __UpperCAmelCase=249 , __UpperCAmelCase=6 , __UpperCAmelCase=17 , __UpperCAmelCase=25 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=128 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0006 , __UpperCAmelCase=512 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-12 , __UpperCAmelCase=1 , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=5_0256 , __UpperCAmelCase=5_0256 , **__UpperCAmelCase , ):
lowerCAmelCase__ : int = vocab_size
lowerCAmelCase__ : List[str] = action_weight
lowerCAmelCase__ : List[str] = reward_weight
lowerCAmelCase__ : Dict = value_weight
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : Optional[int] = block_size
lowerCAmelCase__ : Tuple = action_dim
lowerCAmelCase__ : Dict = observation_dim
lowerCAmelCase__ : str = transition_dim
lowerCAmelCase__ : Tuple = learning_rate
lowerCAmelCase__ : Any = n_layer
lowerCAmelCase__ : List[str] = n_head
lowerCAmelCase__ : int = n_embd
lowerCAmelCase__ : List[str] = embd_pdrop
lowerCAmelCase__ : Tuple = attn_pdrop
lowerCAmelCase__ : List[Any] = resid_pdrop
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : str = layer_norm_eps
lowerCAmelCase__ : str = kaiming_initializer_range
lowerCAmelCase__ : int = use_cache
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
| 678 |
'''simple docstring'''
import re
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
a_ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowercase__ ,lowercase__ ) )
if __name__ == "__main__":
a_ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 685 | 0 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _lowercase ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
_UpperCamelCase = Dataset.from_dict(lowercase__ )
return dataset
class lowerCamelCase_ ( lowercase_ ):
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = get_dataset()
_UpperCamelCase = make_duplicate_clusters(lowerCamelCase_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowercase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase = get_dataset()
_UpperCamelCase , _UpperCamelCase = deduplicate_dataset(lowerCamelCase_ )
self.assertEqual(len(lowerCamelCase_ ) , 2 )
print(lowerCamelCase_ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCamelCase_ )
| 147 |
'''simple docstring'''
import argparse
import os
import re
a_ = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
a_ = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __UpperCAmelCase (lowercase__ ,lowercase__ = False ) -> List[Any]:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.read()
a_ = content.split("\n" )
a_ = []
a_ = 0
while line_idx < len(lowercase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a_ = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a_ = sorted(lowercase__ ,key=lambda lowercase__ : _re_identifier.search(lowercase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(lowercase__ ) )
elif "\n".join(lowercase__ ) != content:
return True
def __UpperCAmelCase (lowercase__ = False ) -> Optional[int]:
'''simple docstring'''
a_ = [os.path.join(lowercase__ ,lowercase__ ) for f in os.listdir(lowercase__ ) if f.endswith(".py" )]
a_ = [sort_auto_mapping(lowercase__ ,overwrite=lowercase__ ) for fname in fnames]
if not overwrite and any(lowercase__ ):
a_ = [f for f, d in zip(lowercase__ ,lowercase__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(lowercase__ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 685 | 0 |
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return x + 2
class __magic_name__ ( unittest.TestCase ):
def lowerCAmelCase ( self : str ):
__snake_case = "x = 3"
__snake_case = {}
__snake_case = evaluate(snake_case_ , {} , state=snake_case_ )
assert result == 3
self.assertDictEqual(snake_case_ , {"x": 3} )
__snake_case = "x = y"
__snake_case = {"y": 5}
__snake_case = evaluate(snake_case_ , {} , state=snake_case_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(snake_case_ , {"x": 5, "y": 5} )
def lowerCAmelCase ( self : Optional[Any] ):
__snake_case = "y = add_two(x)"
__snake_case = {"x": 3}
__snake_case = evaluate(snake_case_ , {"add_two": add_two} , state=snake_case_ )
assert result == 5
self.assertDictEqual(snake_case_ , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
__snake_case = evaluate(snake_case_ , {} , state=snake_case_ )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCAmelCase ( self : Tuple ):
__snake_case = "x = 3"
__snake_case = {}
__snake_case = evaluate(snake_case_ , {} , state=snake_case_ )
assert result == 3
self.assertDictEqual(snake_case_ , {"x": 3} )
def lowerCAmelCase ( self : Union[str, Any] ):
__snake_case = "test_dict = {'x': x, 'y': add_two(x)}"
__snake_case = {"x": 3}
__snake_case = evaluate(snake_case_ , {"add_two": add_two} , state=snake_case_ )
self.assertDictEqual(snake_case_ , {"x": 3, "y": 5} )
self.assertDictEqual(snake_case_ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowerCAmelCase ( self : List[str] ):
__snake_case = "x = 3\ny = 5"
__snake_case = {}
__snake_case = evaluate(snake_case_ , {} , state=snake_case_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(snake_case_ , {"x": 3, "y": 5} )
def lowerCAmelCase ( self : List[str] ):
__snake_case = "text = f'This is x: {x}.'"
__snake_case = {"x": 3}
__snake_case = evaluate(snake_case_ , {} , state=snake_case_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(snake_case_ , {"x": 3, "text": "This is x: 3."} )
def lowerCAmelCase ( self : str ):
__snake_case = "if x <= 3:\n y = 2\nelse:\n y = 5"
__snake_case = {"x": 3}
__snake_case = evaluate(snake_case_ , {} , state=snake_case_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(snake_case_ , {"x": 3, "y": 2} )
__snake_case = {"x": 8}
__snake_case = evaluate(snake_case_ , {} , state=snake_case_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(snake_case_ , {"x": 8, "y": 5} )
def lowerCAmelCase ( self : Union[str, Any] ):
__snake_case = "test_list = [x, add_two(x)]"
__snake_case = {"x": 3}
__snake_case = evaluate(snake_case_ , {"add_two": add_two} , state=snake_case_ )
self.assertListEqual(snake_case_ , [3, 5] )
self.assertDictEqual(snake_case_ , {"x": 3, "test_list": [3, 5]} )
def lowerCAmelCase ( self : Optional[int] ):
__snake_case = "y = x"
__snake_case = {"x": 3}
__snake_case = evaluate(snake_case_ , {} , state=snake_case_ )
assert result == 3
self.assertDictEqual(snake_case_ , {"x": 3, "y": 3} )
def lowerCAmelCase ( self : Any ):
__snake_case = "test_list = [x, add_two(x)]\ntest_list[1]"
__snake_case = {"x": 3}
__snake_case = evaluate(snake_case_ , {"add_two": add_two} , state=snake_case_ )
assert result == 5
self.assertDictEqual(snake_case_ , {"x": 3, "test_list": [3, 5]} )
__snake_case = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
__snake_case = {"x": 3}
__snake_case = evaluate(snake_case_ , {"add_two": add_two} , state=snake_case_ )
assert result == 5
self.assertDictEqual(snake_case_ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowerCAmelCase ( self : Tuple ):
__snake_case = "x = 0\nfor i in range(3):\n x = i"
__snake_case = {}
__snake_case = evaluate(snake_case_ , {"range": range} , state=snake_case_ )
assert result == 2
self.assertDictEqual(snake_case_ , {"x": 2, "i": 2} )
| 163 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685 | 0 |
'''simple docstring'''
def a_ ( lowerCamelCase : List[Any] ):
lowerCAmelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def a_ ( lowerCamelCase : Any = 5000 ):
lowerCAmelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , lowercase__ )]
for i, pentagonal_i in enumerate(lowercase__ ):
for j in range(lowercase__ , len(lowercase__ ) ):
lowerCAmelCase = pentagonal_nums[j]
lowerCAmelCase = pentagonal_i + pentagonal_j
lowerCAmelCase = pentagonal_j - pentagonal_i
if is_pentagonal(lowercase__ ) and is_pentagonal(lowercase__ ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 133 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[Any]) ->List[str]:
'''simple docstring'''
a_ = [
[],
[],
[],
]
def _lowerCAmelCase ( self: Dict , a: int , a: int) ->None:
'''simple docstring'''
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(a)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__( self: Dict) ->str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any) ->List[str]:
'''simple docstring'''
a_ = []
def _lowerCAmelCase ( self: int , a: int) ->None:
'''simple docstring'''
if len(self.queue) == 1_00:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(a)
def _lowerCAmelCase ( self: List[str]) ->int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
a_ = min(self.queue)
self.queue.remove(a)
return data
def __str__( self: Optional[int]) ->str:
'''simple docstring'''
return str(self.queue)
def __UpperCAmelCase () -> Union[str, Any]:
'''simple docstring'''
a_ = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 0 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowerCamelCase_( lowercase_ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ = 1_0_1 ):
_lowerCamelCase = length
def __len__( self ):
return self.length
def __getitem__( self , lowerCamelCase__ ):
return i
class lowerCamelCase_:
'''simple docstring'''
def __call__( self , lowerCamelCase__ ):
return {"input_ids": torch.tensor(lowerCamelCase__ ), "labels": torch.tensor(lowerCamelCase__ )}
class lowerCamelCase_( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_lowerCamelCase = nn.Linear(1_2_0 , 8_0 )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowerCamelCase_( lowercase_ ):
'''simple docstring'''
@require_torch_neuroncore
def snake_case__ ( self ):
_lowerCamelCase = F"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
_lowerCamelCase = self.get_auto_remove_tmp_dir()
_lowerCamelCase = F"""--output_dir {output_dir}""".split()
_lowerCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowerCamelCase_( lowercase_ ):
'''simple docstring'''
@require_torch_multi_gpu
def snake_case__ ( self ):
_lowerCamelCase = F"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
_lowerCamelCase = self.get_auto_remove_tmp_dir()
_lowerCamelCase = F"""--output_dir {output_dir}""".split()
_lowerCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__SCREAMING_SNAKE_CASE : Dict = HfArgumentParser((TrainingArguments,))
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
__SCREAMING_SNAKE_CASE : Tuple = DummyDataset(dataset_length)
def lowerCAmelCase_( lowercase_ : Tuple ) -> Dict:
_lowerCamelCase = list(range(len(lowercase__ ) ) )
_lowerCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
__SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__SCREAMING_SNAKE_CASE : Optional[int] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__SCREAMING_SNAKE_CASE : str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__SCREAMING_SNAKE_CASE : Dict = 2
__SCREAMING_SNAKE_CASE : Tuple = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__SCREAMING_SNAKE_CASE : int = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__SCREAMING_SNAKE_CASE : Tuple = None
| 661 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a_ = Dataset.from_dict(lowercase__ )
return dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = get_dataset()
a_ = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = get_dataset()
a_ , a_ = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2)
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a)
| 685 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE_ = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 237 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , *a: str , **a: Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 | 0 |
def _SCREAMING_SNAKE_CASE ( __lowercase : str = 2_0_0 ) -> int:
"""simple docstring"""
__A = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
__A = [0] * (pence + 1)
__A = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 637 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Any , a: Path , a: Union[str, None] = None , a: Union[List[str], None] = None , a: Union[str, List[str], None] = None , a: bool = True , ) ->Optional[Any]:
'''simple docstring'''
a_ = [file for file in os.listdir(a) if os.path.isfile(os.path.join(a , a))]
if identifier is not None:
a_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a):
for n_ in n_identifier:
a_ = [file for file in files if n_ not in file]
else:
a_ = [file for file in files if n_identifier not in file]
a_ = ignore_files or []
ignore_files.append("__init__.py")
a_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a)
if only_modules:
a_ = file.split(".")[0]
try:
a_ = getattr(a , a)
a_ = doctest.DocTestSuite(a)
a_ = unittest.TextTestRunner().run(a)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
a_ = doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "modeling"
a_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "tokenization"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "configuration"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a)
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = Path("docs/source")
a_ = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a)
| 685 | 0 |
'''simple docstring'''
from math import loga
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowercase__ , lowercase__ ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 100 ) -> int:
'''simple docstring'''
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Any = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
A__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 353 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =(PNDMScheduler,)
_UpperCAmelCase =(('''num_inference_steps''', 50),)
def _lowerCAmelCase ( self: int , **a: Optional[int]) ->Any:
'''simple docstring'''
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a)
return config
def _lowerCAmelCase ( self: Any , a: Tuple=0 , **a: Any) ->Any:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
new_scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Union[str, Any] , a: str=0 , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
# copy over dummy past residuals
new_scheduler.set_timesteps(a)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: Dict , **a: int) ->Any:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.prk_timesteps):
a_ = model(a , a)
a_ = scheduler.step_prk(a , a , a).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ = model(a , a)
a_ = scheduler.step_plms(a , a , a).prev_sample
return sample
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , "set_timesteps"):
scheduler.set_timesteps(a)
elif num_inference_steps is not None and not hasattr(a , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , 0 , a , **a).prev_sample
a_ = scheduler.step_prk(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step_plms(a , 0 , a , **a).prev_sample
a_ = scheduler.step_plms(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(steps_offset=1)
a_ = scheduler_class(**a)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]) , )
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=a , beta_end=a)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=a)
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = 27
for scheduler_class in self.scheduler_classes:
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ = scheduler.step_prk(a , a , a).prev_sample
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
with self.assertRaises(a):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.full_loop()
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 685 | 0 |
"""simple docstring"""
__A : Optional[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__A : List[Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->list[int]:
"""simple docstring"""
__lowercase : Optional[int] = True
__lowercase : Optional[int] = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(lowercase__, lowercase__, lowercase__ )
order.append(lowercase__ )
return order
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->list[int]:
"""simple docstring"""
__lowercase : str = True
__lowercase : Dict = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(lowercase__, lowercase__, lowercase__ )
return component
def snake_case__ ( _lowerCamelCase ) ->list[list[int]]:
"""simple docstring"""
__lowercase : Optional[int] = len(lowercase__ ) * [False]
__lowercase : str = {vert: [] for vert in range(len(lowercase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(lowercase__ )
__lowercase : Union[str, Any] = []
for i, was_visited in enumerate(lowercase__ ):
if not was_visited:
order += topology_sort(lowercase__, lowercase__, lowercase__ )
__lowercase : Tuple = []
__lowercase : int = len(lowercase__ ) * [False]
for i in range(len(lowercase__ ) ):
__lowercase : Tuple = order[len(lowercase__ ) - i - 1]
if not visited[vert]:
__lowercase : Any = find_components(lowercase__, lowercase__, lowercase__ )
components_list.append(lowercase__ )
return components_list
| 575 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "bird"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
a_ = pipe.prepare_image_inputs([canny_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "Chef in the kitchen"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
a_ = pipe.prepare_image_inputs([pose_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 685 | 0 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__snake_case = 'true'
def _lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=82 , lowerCamelCase__ : List[Any]=16 ):
set_seed(42 )
lowercase__ : str = RegressionModel()
lowercase__ : Any = deepcopy(lowercase__ )
lowercase__ : Tuple = RegressionDataset(length=lowercase__ )
lowercase__ : int = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
lowercase__ , lowercase__ : Any = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def _lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Tuple=False ):
lowercase__ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
lowercase__ : Any = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(lowerCamelCase__ : Union[str, Any] ):
lowercase__ : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
lowercase__ : Dict = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
lowercase__ : Optional[int] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase__ : Union[str, Any] ):
if use_longest:
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def _lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] ):
lowercase__ : List[Any] = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
lowercase__ : Union[str, Any] = get_dataloader(lowercase__ , not dispatch_batches )
lowercase__ : List[str] = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=lowercase__ )
lowercase__ , lowercase__ : int = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict ):
lowercase__ : Dict = []
for batch in dataloader:
lowercase__ , lowercase__ : Dict = batch.values()
with torch.no_grad():
lowercase__ : Dict = model(lowercase__ )
lowercase__ , lowercase__ : Tuple = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowercase__ , lowercase__ : List[str] = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
lowercase__ , lowercase__ : List[Any] = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def _lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Dict=82 , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : List[Any]=16 ):
lowercase__ , lowercase__ , lowercase__ : Optional[int] = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
lowercase__ , lowercase__ : List[Any] = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'''
def _lowerCamelCase ( lowerCamelCase__ : str = False , lowerCamelCase__ : List[str] = False ):
lowercase__ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
lowercase__ , lowercase__ : List[Any] = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
lowercase__ , lowercase__ , lowercase__ : str = setup["""no"""]
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
lowercase__ : Optional[int] = model(**lowercase__ )
lowercase__ : str = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch["""labels"""] )
lowercase__ : str = metric.compute()
# Then do distributed
lowercase__ , lowercase__ , lowercase__ : List[Any] = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowercase__ : Dict = model(**lowercase__ )
lowercase__ : Any = outputs.logits.argmax(dim=-1 )
lowercase__ : Optional[int] = batch["""labels"""]
lowercase__ , lowercase__ : List[Any] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
lowercase__ : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def _lowerCamelCase ( ):
lowercase__ : str = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowercase__ : int = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
lowercase__ : List[Any] = Accelerator()
test_torch_metrics(lowercase__ , 5_12 )
accelerator.state._reset_state()
def _lowerCamelCase ( lowerCamelCase__ : Dict ):
main()
if __name__ == "__main__":
main() | 200 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 1000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 ,lowercase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase , ):
lowerCAmelCase__ : Optional[Any] = parent
lowerCAmelCase__ : str = 13
lowerCAmelCase__ : Union[str, Any] = 7
lowerCAmelCase__ : Dict = 30
lowerCAmelCase__ : Tuple = self.seq_length + self.mem_len
lowerCAmelCase__ : List[str] = 15
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : int = 99
lowerCAmelCase__ : int = [10, 50, 80]
lowerCAmelCase__ : List[str] = 32
lowerCAmelCase__ : Any = 32
lowerCAmelCase__ : List[Any] = 4
lowerCAmelCase__ : Optional[Any] = 8
lowerCAmelCase__ : List[str] = 128
lowerCAmelCase__ : str = 2
lowerCAmelCase__ : Any = 2
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : List[Any] = 1
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : List[Any] = 3
lowerCAmelCase__ : Dict = self.vocab_size - 1
lowerCAmelCase__ : int = 0.01
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Dict = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __magic_name__( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = TFTransfoXLModel(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : str = model(__UpperCAmelCase ).to_tuple()
lowerCAmelCase__ : List[str] = {'''input_ids''': input_ids_a, '''mems''': mems_a}
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Any = TFTransfoXLLMHeadModel(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = model(__UpperCAmelCase ).to_tuple()
lowerCAmelCase__ : Any = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowerCAmelCase__ , lowerCAmelCase__ : Dict = model(__UpperCAmelCase ).to_tuple()
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = model([input_ids_a, mems_a] ).to_tuple()
lowerCAmelCase__ : Dict = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowerCAmelCase__ , lowerCAmelCase__ : Dict = model(__UpperCAmelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = TFTransfoXLForSequenceClassification(__UpperCAmelCase )
lowerCAmelCase__ : Any = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) : Dict = config_and_inputs
lowerCAmelCase__ : str = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
A__ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
A__ = () if is_tf_available() else ()
A__ = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
A__ = False
A__ = False
A__ = False
A__ = False
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __magic_name__( self ):
lowerCAmelCase__ : Any = TFTransfoXLModelTester(self )
lowerCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=__UpperCAmelCase , d_embed=37 )
def __magic_name__( self ):
self.config_tester.run_common_tests()
def __magic_name__( self ):
self.model_tester.set_seed()
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__UpperCAmelCase )
def __magic_name__( self ):
self.model_tester.set_seed()
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[str] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = model_class(__UpperCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowerCAmelCase__ : str = model.get_output_embeddings()
assert isinstance(__UpperCAmelCase , tf.keras.layers.Layer )
lowerCAmelCase__ : str = model.get_bias()
assert name is None
else:
lowerCAmelCase__ : List[str] = model.get_output_embeddings()
assert x is None
lowerCAmelCase__ : Any = model.get_bias()
assert name is None
def __magic_name__( self ):
pass
@slow
def __magic_name__( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : str = TFTransfoXLModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def __magic_name__( self ):
pass
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def __magic_name__( self ):
lowerCAmelCase__ : int = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowerCAmelCase__ : Dict = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowerCAmelCase__ : List[str] = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowerCAmelCase__ : int = model.generate(__UpperCAmelCase , max_length=200 , do_sample=__UpperCAmelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase )
| 678 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 ,lowercase__ ,2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def __UpperCAmelCase (lowercase__ = 999966663333 ) -> int:
'''simple docstring'''
a_ = math.floor(math.sqrt(lowercase__ ) ) + 100
a_ = prime_sieve(lowercase__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 685 | 0 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase_ ( lowercase_ , unittest.TestCase ):
__lowercase : Tuple = ProphetNetTokenizer
__lowercase : Tuple = False
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
super().setUp()
_UpperCamelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowercase ( self , lowerCamelCase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = "UNwant\u00E9d,running"
_UpperCamelCase = "unwanted, running"
return input_text, output_text
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.tokenizer_class(self.vocab_file )
_UpperCamelCase = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCamelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [9, 6, 7, 12, 10, 11] )
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def lowercase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase = BasicTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowercase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def lowercase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = BasicTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = BasicTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def lowercase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def lowercase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def lowercase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = BasicTokenizer(do_lower_case=lowerCamelCase_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCamelCase = {}
for i, token in enumerate(lowerCamelCase_ ):
_UpperCamelCase = i
_UpperCamelCase = WordpieceTokenizer(vocab=lowerCamelCase_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def lowercase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_UpperCamelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCamelCase = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
_UpperCamelCase = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors="pt" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def lowercase ( self ) -> Tuple:
"""simple docstring"""
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def lowercase ( self ) -> str:
"""simple docstring"""
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def lowercase ( self ) -> str:
"""simple docstring"""
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def lowercase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_UpperCamelCase = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase_ )
_UpperCamelCase = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase_ )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 147 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = UniSpeechSatForSequenceClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["projector.weight"]
a_ = downstream_dict["projector.bias"]
a_ = downstream_dict["model.post_net.linear.weight"]
a_ = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["model.linear.weight"]
a_ = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = UniSpeechSatForXVector.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["connector.weight"]
a_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
a_ = checkpoint["Downstream"]
a_ = UniSpeechSatConfig.from_pretrained(lowercase__ )
a_ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ ,return_attention_mask=lowercase__ ,do_normalize=lowercase__ )
a_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ = convert_classification(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ = convert_diarization(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForXVector" ):
a_ = convert_xvector(lowercase__ ,lowercase__ ,lowercase__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 685 | 0 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
_SCREAMING_SNAKE_CASE = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
__snake_case = list(s_dict.keys() )
for key in keys:
__snake_case = r".*/layers_(\d+)"
__snake_case = key
if re.match(lowercase__ , lowercase__ ):
__snake_case = re.sub(r"layers_(\d+)" , r"block/\1/layer" , lowercase__ )
__snake_case = r"(encoder|decoder)\/"
if re.match(lowercase__ , lowercase__ ):
__snake_case = re.match(lowercase__ , lowercase__ ).groups()
if groups[0] == "encoder":
__snake_case = re.sub(r"/mlp/" , r"/1/mlp/" , lowercase__ )
__snake_case = re.sub(r"/pre_mlp_layer_norm/" , r"/1/layer_norm/" , lowercase__ )
elif groups[0] == "decoder":
__snake_case = re.sub(r"/mlp/" , r"/2/mlp/" , lowercase__ )
__snake_case = re.sub(r"/pre_mlp_layer_norm/" , r"/2/layer_norm/" , lowercase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__snake_case = new_key.replace(lowercase__ , lowercase__ )
print(F'''{key} -> {new_key}''' )
__snake_case = s_dict.pop(lowercase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__snake_case = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__snake_case = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__snake_case = s_dict[key].shape[0]
__snake_case = s_dict[key]
for idx in range(lowercase__ ):
__snake_case = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/" , "nested fstring" )}''' )
s_dict.pop(lowercase__ )
return s_dict
_SCREAMING_SNAKE_CASE = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
import regex as re
with open(lowercase__ , "r" ) as f:
__snake_case = f.read()
__snake_case = re.findall(r"(.*) = ([0-9.]*)" , lowercase__ )
__snake_case = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__snake_case = float(lowercase__ ) if "." in value else int(lowercase__ )
__snake_case = re.findall(r"(.*activations) = \(\'(.*)\',\)" , lowercase__ )[0]
__snake_case = str(activation[1] )
__snake_case = num_experts
__snake_case = SwitchTransformersConfig(**lowercase__ )
return config
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="./" , SCREAMING_SNAKE_CASE=8 ) -> str:
"""simple docstring"""
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
__snake_case = checkpoints.load_tax_checkpoint(lowercase__ )
if gin_file is not None:
__snake_case = convert_gin_to_config(lowercase__ , lowercase__ )
else:
__snake_case = SwitchTransformersConfig.from_pretrained(lowercase__ )
__snake_case = SwitchTransformersForConditionalGeneration(lowercase__ )
__snake_case = flax_params["target"]
__snake_case = flatten_dict(lowercase__ , sep="/" )
__snake_case = rename_keys(lowercase__ )
__snake_case = unflatten_dict(lowercase__ , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowercase__ , lowercase__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 163 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 685 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class UpperCAmelCase_ ( lowercase_ ):
lowerCamelCase : List[str] = '''perceiver'''
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str=2_5_6 , UpperCAmelCase__ : Optional[int]=1_2_8_0 , UpperCAmelCase__ : Optional[Any]=7_6_8 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : int=2_6 , UpperCAmelCase__ : List[str]=8 , UpperCAmelCase__ : Any=8 , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[int]="kv" , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Optional[int]=1 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : Any=1E-12 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[Any]=2_6_2 , UpperCAmelCase__ : str=2_0_4_8 , UpperCAmelCase__ : Any=5_6 , UpperCAmelCase__ : List[str]=[3_6_8, 4_9_6] , UpperCAmelCase__ : Optional[Any]=1_6 , UpperCAmelCase__ : List[str]=1_9_2_0 , UpperCAmelCase__ : Any=1_6 , UpperCAmelCase__ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **UpperCAmelCase__ : Dict , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase__ )
lowerCAmelCase = num_latents
lowerCAmelCase = d_latents
lowerCAmelCase = d_model
lowerCAmelCase = num_blocks
lowerCAmelCase = num_self_attends_per_block
lowerCAmelCase = num_self_attention_heads
lowerCAmelCase = num_cross_attention_heads
lowerCAmelCase = qk_channels
lowerCAmelCase = v_channels
lowerCAmelCase = cross_attention_shape_for_attention
lowerCAmelCase = self_attention_widening_factor
lowerCAmelCase = cross_attention_widening_factor
lowerCAmelCase = hidden_act
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = use_query_residual
# masked language modeling attributes
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
# image classification attributes
lowerCAmelCase = image_size
# flow attributes
lowerCAmelCase = train_size
# multimodal autoencoding attributes
lowerCAmelCase = num_frames
lowerCAmelCase = audio_samples_per_frame
lowerCAmelCase = samples_per_patch
lowerCAmelCase = output_shape
class UpperCAmelCase_ ( lowercase_ ):
@property
def __UpperCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def __UpperCAmelCase ( self : Any ) -> float:
return 1E-4
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[TensorType] = None , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 4_0 , UpperCAmelCase__ : int = 4_0 , ) -> Mapping[str, Any]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase = preprocessor.num_special_tokens_to_add(UpperCAmelCase__ )
lowerCAmelCase = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase = [' '.join(['a'] ) * seq_length] * batch_size
lowerCAmelCase = dict(preprocessor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ ) )
lowerCAmelCase = inputs.pop('input_ids' )
return inputs
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase = compute_effective_axis_dimension(UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowerCAmelCase = self._generate_dummy_images(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = dict(preprocessor(images=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ ) )
lowerCAmelCase = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 133 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = json.loads(f.read() )
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ = b
a_ = idx
for wd in b:
a_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , a: Union[str, Any] , a: Optional[int] , a: List[str]="<|endoftext|>" , a: Union[str, Any]="<|endoftext|>" , a: Dict="<|startoftext|>" , a: Dict="<|endoftext|>" , a: Union[str, Any]=False , **a: Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ = do_clean_text
a_ , a_ , a_ , a_ = load_vocab_and_emoji(a , a)
a_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return len(self.raw_vocab)
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _lowerCAmelCase ( self: Union[str, Any] , a: Any) ->Dict:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text)
def _lowerCAmelCase ( self: int , a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a)
def _lowerCAmelCase ( self: Optional[int] , a: Any) ->str:
'''simple docstring'''
a_ = "".join(a).strip()
return out_string
def _lowerCAmelCase ( self: Any , a: "Conversation") ->List[int]:
'''simple docstring'''
a_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
a_ = input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self: int , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = 0
if os.path.isdir(a):
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ = token_index
writer.write(",".join(a) + "\n")
index += 1
with open(a , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , a)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: Any , a: Union[str, Any] , a: Any) ->List[Any]:
'''simple docstring'''
a_ = vocab # same as swe
a_ = ids_to_tokens # same as bpe
a_ = emoji
a_ = np.max([len(a) for w in self.vocab.keys()])
a_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Dict) ->Any:
'''simple docstring'''
return len(self.ids_to_tokens)
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple) ->Any:
'''simple docstring'''
a_ = self.content_repattera.sub("<URL>" , a)
a_ = self.content_repattera.sub("<EMAIL>" , a)
a_ = self.content_repattera.sub("<TEL>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<PRICE>" , a)
a_ = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _lowerCAmelCase ( self: Any , a: int , a: Optional[int]=False) ->List[str]:
'''simple docstring'''
a_ = text.replace(" " , "<SP>")
a_ = text.replace(" " , "<SP>")
a_ = text.replace("\r\n" , "<BR>")
a_ = text.replace("\n" , "<BR>")
a_ = text.replace("\r" , "<BR>")
a_ = text.replace("\t" , "<TAB>")
a_ = text.replace("—" , "ー")
a_ = text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ = text.replace(a , a)
if clean:
a_ = self.clean_text(a)
def check_simbol(a: Dict):
a_ = x.encode()
if len(a) == 1 and len(a) == 2:
a_ = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a: str):
a_ = x.encode()
if len(a) == 1 and len(a) == 3:
a_ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
a_ = 0
a_ = []
while pos < len(a):
a_ = min(len(a) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ = [] # (token_id, token, pos)
for e in range(a , a , -1):
a_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a) > 2:
a_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(a) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ = sorted(a , key=lambda a: x[0])[0]
result.append(a)
a_ = e
else:
a_ = pos + 1
a_ = text[pos:end]
if check_simbol(a):
result.append("<KIGOU>")
elif checkuae(a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ = end
return result
def _lowerCAmelCase ( self: int , a: List[Any] , a: Any="\n") ->str:
'''simple docstring'''
a_ = []
a_ = []
a_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(a)
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = "".join(a)
return text
| 685 | 0 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowerCamelCase_( lowercase_ ):
'''simple docstring'''
lowercase__ : int = 'Wav2Vec2FeatureExtractor'
lowercase__ : Tuple = 'AutoTokenizer'
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = self.feature_extractor
_lowerCamelCase = False
@classmethod
def snake_case__ ( cls , lowerCamelCase__ , **lowerCamelCase__ ):
try:
return super().from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
except OSError:
warnings.warn(
F"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , lowerCamelCase__ , )
_lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = WavaVecaCTCTokenizer.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
return cls(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__ , **lowerCamelCase__ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_lowerCamelCase = kwargs.pop('''raw_speech''' )
else:
_lowerCamelCase = kwargs.pop('''audio''' , lowerCamelCase__ )
_lowerCamelCase = kwargs.pop('''sampling_rate''' , lowerCamelCase__ )
_lowerCamelCase = kwargs.pop('''text''' , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
_lowerCamelCase = args[0]
_lowerCamelCase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_lowerCamelCase = self.feature_extractor(lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None:
_lowerCamelCase = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCamelCase = encodings['''input_ids''']
return inputs
def snake_case__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = kwargs.pop('''input_features''' , lowerCamelCase__ )
_lowerCamelCase = kwargs.pop('''labels''' , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
_lowerCamelCase = args[0]
_lowerCamelCase = args[1:]
if input_features is not None:
_lowerCamelCase = self.feature_extractor.pad(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
if labels is not None:
_lowerCamelCase = self.tokenizer.pad(lowerCamelCase__ , **lowerCamelCase__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_lowerCamelCase = labels['''input_ids''']
return input_features
def snake_case__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@contextmanager
def snake_case__ ( self ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_lowerCamelCase = True
_lowerCamelCase = self.tokenizer
yield
_lowerCamelCase = self.feature_extractor
_lowerCamelCase = False
| 661 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , a: Optional[Any] , a: Dict=13 , a: List[str]=7 , a: Optional[Any]=True , a: int=True , a: Any=True , a: Optional[int]=True , a: int=True , a: Dict=False , a: Union[str, Any]=False , a: Dict=False , a: List[str]=2 , a: Union[str, Any]=99 , a: List[Any]=0 , a: Optional[int]=32 , a: List[str]=5 , a: int=4 , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Optional[int]=5_12 , a: str=12 , a: Dict=2 , a: Any=0.02 , a: Optional[int]=3 , a: str=4 , a: Optional[int]="last" , a: Tuple=None , a: Any=None , ) ->int:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_lengths
a_ = use_token_type_ids
a_ = use_labels
a_ = gelu_activation
a_ = sinusoidal_embeddings
a_ = causal
a_ = asm
a_ = n_langs
a_ = vocab_size
a_ = n_special
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = summary_type
a_ = use_proj
a_ = scope
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_input_lengths:
a_ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , 2).float()
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: List[Any] , a: List[Any] , a: Optional[int] , a: int , a: str , a: Any , a: str , a: List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModel(config=a)
model.to(a)
model.eval()
a_ = model(a , lengths=a , langs=a)
a_ = model(a , langs=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Optional[Any] , a: Dict , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Any , a: Tuple , a: str , a: List[str] , ) ->Dict:
'''simple docstring'''
a_ = FlaubertWithLMHeadModel(a)
model.to(a)
model.eval()
a_ = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: Optional[Any] , a: List[Any] , a: List[str] , a: List[str] , a: List[str] , a: Optional[Any] , a: str , a: Union[str, Any] , ) ->str:
'''simple docstring'''
a_ = FlaubertForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Optional[Any] , a: Any , a: Dict , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , ) ->int:
'''simple docstring'''
a_ = FlaubertForQuestionAnswering(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((a_) , ) = result_with_labels.to_tuple()
a_ = model(a , start_positions=a , end_positions=a)
((a_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Union[str, Any] , a: Any , a: Tuple , a: Union[str, Any] , a: int , a: int , a: Dict , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self: str , a: List[str] , a: Dict , a: Tuple , a: Optional[Any] , a: Any , a: Any , a: str , a: str , a: Optional[Any] , ) ->List[Any]:
'''simple docstring'''
a_ = self.num_labels
a_ = FlaubertForTokenClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: Dict , a: Tuple , a: List[Any] , a: Dict , a: Optional[Any] , a: Optional[Any] , a: Optional[Any] , a: Union[str, Any] , a: List[str] , a: Tuple , ) ->Dict:
'''simple docstring'''
a_ = self.num_choices
a_ = FlaubertForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: Any) ->List[Any]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self: Optional[Any] , a: List[Any] , a: Any , a: List[str] , a: Union[str, Any] , a: int) ->int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: List[Any] , a: Tuple=False) ->List[Any]:
'''simple docstring'''
a_ = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModelTester(self)
a_ = ConfigTester(self , config_class=a , emb_dim=37)
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a)
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a)
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a)
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a)
@slow
def _lowerCAmelCase ( self: Any) ->Any:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = FlaubertModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ = True
a_ = model_class(config=a)
a_ = self._prepare_for_class(a , a)
a_ = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt"))
a_ = torch.jit.load(os.path.join(a , "traced_model.pt") , map_location=a)
loaded(inputs_dict["input_ids"].to(a) , inputs_dict["attention_mask"].to(a))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
a_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
a_ = model(a)[0]
a_ = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4))
| 685 | 0 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE_ = """\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n"""
SCREAMING_SNAKE_CASE_ = """\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n"""
SCREAMING_SNAKE_CASE_ = """\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def snake_case_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def snake_case_ ( self , a_ , a_ , a_=False ):
if return_pvalue:
a_ : Dict = pearsonr(a_ , a_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(a_ , a_ )[0] )} | 237 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase (lowercase__ = 10001 ) -> int:
'''simple docstring'''
try:
a_ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a_ = []
a_ = 2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
__a : Union[str, Any] = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
__a : Optional[int] = ["a", "b", "c", "d", "e"]
def _SCREAMING_SNAKE_CASE ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : Tuple ) -> str:
"""simple docstring"""
__A = start
# add current to visited
visited.append(lowercase__ )
__A = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__A = topological_sort(lowercase__ , lowercase__ , lowercase__ )
# if all neighbors visited add current to sort
sort.append(lowercase__ )
# if all vertices haven't been visited select a new one to visit
if len(lowercase__ ) != len(lowercase__ ):
for vertice in vertices:
if vertice not in visited:
__A = topological_sort(lowercase__ , lowercase__ , lowercase__ )
# return sort
return sort
if __name__ == "__main__":
__a : List[str] = topological_sort("a", [], [])
print(sort)
| 637 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685 | 0 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def UpperCamelCase( *lowerCamelCase , **lowerCamelCase ):
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def UpperCamelCase( self ):
_snake_case = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
_snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_snake_case = image_classifier(lowerCamelCase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowerCamelCase ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
_snake_case = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
[
{"score": 0.333, "label": ANY(lowerCamelCase )},
{"score": 0.333, "label": ANY(lowerCamelCase )},
{"score": 0.333, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(lowerCamelCase )},
{"score": 0.333, "label": ANY(lowerCamelCase )},
{"score": 0.333, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(lowerCamelCase )},
{"score": 0.333, "label": ANY(lowerCamelCase )},
{"score": 0.333, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(lowerCamelCase )},
{"score": 0.333, "label": ANY(lowerCamelCase )},
{"score": 0.333, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(lowerCamelCase )},
{"score": 0.333, "label": ANY(lowerCamelCase )},
{"score": 0.333, "label": ANY(lowerCamelCase )},
],
] , )
@require_tf
def UpperCamelCase( self ):
_snake_case = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
_snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_snake_case = image_classifier(lowerCamelCase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
_snake_case = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
[
{"score": 0.333, "label": ANY(lowerCamelCase )},
{"score": 0.333, "label": ANY(lowerCamelCase )},
{"score": 0.333, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(lowerCamelCase )},
{"score": 0.333, "label": ANY(lowerCamelCase )},
{"score": 0.333, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(lowerCamelCase )},
{"score": 0.333, "label": ANY(lowerCamelCase )},
{"score": 0.333, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(lowerCamelCase )},
{"score": 0.333, "label": ANY(lowerCamelCase )},
{"score": 0.333, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(lowerCamelCase )},
{"score": 0.333, "label": ANY(lowerCamelCase )},
{"score": 0.333, "label": ANY(lowerCamelCase )},
],
] , )
@slow
@require_torch
def UpperCamelCase( self ):
_snake_case = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
_snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_snake_case = image_classifier(lowerCamelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
_snake_case = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def UpperCamelCase( self ):
_snake_case = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
_snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_snake_case = image_classifier(lowerCamelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
_snake_case = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 672 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) ,np.inf )
a_ = 0
a_ = np.empty((rows, cols) ,dtype=lowercase__ )
a_ = None
while queue:
((a_) , (a_)) = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ ,(dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Union[str, Any] = {}
_lowercase: Union[str, Any] = 2
while True:
_lowercase: Dict = factor_map.pop(lowercase__ , lowercase__ )
if factor:
_lowercase: Optional[Any] = factor + prime
while x in factor_map:
x += factor
_lowercase: Union[str, Any] = factor
else:
_lowercase: List[Any] = prime
yield prime
prime += 1
def _lowerCAmelCase ( _UpperCamelCase = 1e10 ):
"""simple docstring"""
_lowercase: Dict = sieve()
_lowercase: Tuple = 1
while True:
_lowercase: str = next(lowercase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowercase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 353 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ ) as metadata_file:
a_ = json.load(lowercase__ )
a_ = LukeConfig(use_entity_aware_attention=lowercase__ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
a_ = torch.load(lowercase__ ,map_location="cpu" )["module"]
# Load the entity vocab file
a_ = load_original_entity_vocab(lowercase__ )
# add an entry for [MASK2]
a_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
a_ = AddedToken("<ent>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
a_ = AddedToken("<ent2>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"r" ) as f:
a_ = json.load(lowercase__ )
a_ = "MLukeTokenizer"
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
with open(os.path.join(lowercase__ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
a_ = tokenizer.convert_tokens_to_ids(["@"] )[0]
a_ = tokenizer.convert_tokens_to_ids(["#"] )[0]
a_ = state_dict["embeddings.word_embeddings.weight"]
a_ = word_emb[ent_init_index].unsqueeze(0 )
a_ = word_emb[enta_init_index].unsqueeze(0 )
a_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ = state_dict[bias_name]
a_ = decoder_bias[ent_init_index].unsqueeze(0 )
a_ = decoder_bias[enta_init_index].unsqueeze(0 )
a_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ = F"""encoder.layer.{layer_index}.attention.self."""
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ = state_dict["entity_embeddings.entity_embeddings.weight"]
a_ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ = state_dict["entity_predictions.bias"]
a_ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ = LukeForMaskedLM(config=lowercase__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
a_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
a_ = state_dict[key]
else:
a_ = state_dict[key]
a_ , a_ = model.load_state_dict(lowercase__ ,strict=lowercase__ )
if set(lowercase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ = MLukeTokenizer.from_pretrained(lowercase__ ,task="entity_classification" )
a_ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
a_ = (0, 9)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 33, 768) )
a_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 1, 768) )
a_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
a_ = "Tokyo is the capital of <mask>."
a_ = (24, 30)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
a_ = encoding["input_ids"][0].tolist()
a_ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
a_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase__ )
a_ = outputs.entity_logits[0][0].argmax().item()
a_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = ["[MASK]", "[PAD]", "[UNK]"]
a_ = [json.loads(lowercase__ ) for line in open(lowercase__ )]
a_ = {}
for entry in data:
a_ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ = entity_id
break
a_ = F"""{language}:{entity_name}"""
a_ = entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 685 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Optional[Any] = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase__ ( lowercase_ ):
"""simple docstring"""
__UpperCAmelCase : str = "visual_bert"
def __init__( self : Union[str, Any] , lowercase__ : List[Any]=3_0_5_2_2 , lowercase__ : List[Any]=7_6_8 , lowercase__ : Union[str, Any]=5_1_2 , lowercase__ : List[str]=1_2 , lowercase__ : Tuple=1_2 , lowercase__ : Optional[Any]=3_0_7_2 , lowercase__ : int="gelu" , lowercase__ : Union[str, Any]=0.1 , lowercase__ : int=0.1 , lowercase__ : str=5_1_2 , lowercase__ : Optional[int]=2 , lowercase__ : List[str]=0.0_2 , lowercase__ : Optional[int]=1e-12 , lowercase__ : str=False , lowercase__ : Any=True , lowercase__ : Tuple=1 , lowercase__ : Dict=0 , lowercase__ : Any=2 , **lowercase__ : Optional[Any] , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
__lowercase : str = vocab_size
__lowercase : Optional[Any] = max_position_embeddings
__lowercase : int = hidden_size
__lowercase : Tuple = visual_embedding_dim
__lowercase : List[str] = num_hidden_layers
__lowercase : List[str] = num_attention_heads
__lowercase : Optional[Any] = intermediate_size
__lowercase : Tuple = hidden_act
__lowercase : Optional[Any] = hidden_dropout_prob
__lowercase : int = attention_probs_dropout_prob
__lowercase : Any = initializer_range
__lowercase : Optional[int] = type_vocab_size
__lowercase : Union[str, Any] = layer_norm_eps
__lowercase : Union[str, Any] = bypass_transformer
__lowercase : Optional[int] = special_visual_initialize
| 575 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =LxmertTokenizer
_UpperCAmelCase =LxmertTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
super().setUp()
a_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Optional[Any] , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [7, 4, 5, 10, 8, 9])
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = "I was born in 92000, and this is falsé."
a_ = tokenizer.tokenize(a)
a_ = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
a_ = tokenizer.encode(a , add_special_tokens=a)
a_ = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(a)
a_ = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
| 685 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE ( lowercase_ ):
"""simple docstring"""
_a : Any = '''new-model'''
if is_tf_available():
class _SCREAMING_SNAKE_CASE ( lowercase_ ):
"""simple docstring"""
_a : Dict = NewModelConfig
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__( self ) -> Any:
lowercase__ : List[Any] = """bert-base-cased"""
lowercase__ : List[Any] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Union[str, Any] = TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def UpperCAmelCase__( self ) -> int:
lowercase__ : List[Any] = """bert-base-cased"""
lowercase__ : List[Any] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Union[str, Any] = TFAutoModelForPreTraining.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def UpperCAmelCase__( self ) -> Dict:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Dict = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Dict = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def UpperCAmelCase__( self ) -> List[Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Tuple = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def UpperCAmelCase__( self ) -> Union[str, Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def UpperCAmelCase__( self ) -> Dict:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def UpperCAmelCase__( self ) -> Optional[int]:
for model_name in ["bert-base-uncased"]:
lowercase__ : str = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def UpperCAmelCase__( self ) -> List[Any]:
for model_name in ["bert-base-uncased"]:
lowercase__ : Dict = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : str = TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
@require_tensorflow_probability
def UpperCAmelCase__( self ) -> Dict:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowercase__ : Any = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Optional[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Union[str, Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 1_4410 )
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : Dict = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 1_4410 )
def UpperCAmelCase__( self ) -> Union[str, Any]:
lowercase__ : Any = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : str = copy.deepcopy(model.config )
lowercase__ : Tuple = ["""FunnelBaseModel"""]
lowercase__ : str = TFAutoModel.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
lowercase__ : Any = TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Tuple:
try:
AutoConfig.register("""new-model""" , lowerCamelCase__ )
lowercase__ : Any = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase__ : List[str] = BertModelTester(self ).get_config()
lowercase__ : int = NewModelConfig(**tiny_config.to_dict() )
lowercase__ : int = auto_class.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
lowercase__ : Optional[Any] = auto_class.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def UpperCAmelCase__( self ) -> Optional[int]:
with self.assertRaisesRegex(
lowerCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
lowercase__ : Union[str, Any] = TFAutoModel.from_pretrained("""bert-base""" )
def UpperCAmelCase__( self ) -> Dict:
with self.assertRaisesRegex(
lowerCamelCase__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowercase__ : Optional[Any] = TFAutoModel.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" )
def UpperCAmelCase__( self ) -> Optional[Any]:
with self.assertRaisesRegex(
lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
lowercase__ : str = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCAmelCase__( self ) -> List[str]:
with self.assertRaisesRegex(lowerCamelCase__ , """Use `from_pt=True` to load this model""" ):
lowercase__ : Dict = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : List[str] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
lowercase__ : List[Any] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
lowercase__ : int = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
lowercase__ : Tuple = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 200 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 | 0 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase_ = 3
def __lowerCAmelCase ( UpperCamelCase ) -> int:
print('''Generating primitive root of p''' )
while True:
lowerCAmelCase__ : Any = random.randrange(3 , lowercase__ )
if pow(lowercase__ , 2 , lowercase__ ) == 1:
continue
if pow(lowercase__ , lowercase__ , lowercase__ ) == 1:
continue
return g
def __lowerCAmelCase ( UpperCamelCase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('''Generating prime p...''' )
lowerCAmelCase__ : List[str] = rabin_miller.generate_large_prime(lowercase__ ) # select large prime number.
lowerCAmelCase__ : str = primitive_root(lowercase__ ) # one primitive root on modulo p.
lowerCAmelCase__ : Optional[Any] = random.randrange(3 , lowercase__ ) # private_key -> have to be greater than 2 for safety.
lowerCAmelCase__ : int = cryptomath.find_mod_inverse(pow(lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
lowerCAmelCase__ : int = (key_size, e_a, e_a, p)
lowerCAmelCase__ : List[str] = (key_size, d)
return public_key, private_key
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> None:
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('''\nWARNING:''' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
lowerCAmelCase__ , lowerCAmelCase__ : Any = generate_key(lowercase__ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , '''w''' ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , '''w''' ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def __lowerCAmelCase ( ) -> None:
print('''Making key files...''' )
make_key_files('''elgamal''' , 2048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 678 |
'''simple docstring'''
import re
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
a_ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowercase__ ,lowercase__ ) )
if __name__ == "__main__":
a_ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 685 | 0 |
from manim import *
class lowerCamelCase_ ( lowercase_ ):
def lowercase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = Rectangle(height=0.5 , width=0.5 )
_UpperCamelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_UpperCamelCase = [mem.copy() for i in range(6 )]
_UpperCamelCase = [mem.copy() for i in range(6 )]
_UpperCamelCase = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
_UpperCamelCase = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
_UpperCamelCase = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
_UpperCamelCase = Text("CPU" , font_size=24 )
_UpperCamelCase = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
_UpperCamelCase = [mem.copy() for i in range(1 )]
_UpperCamelCase = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
_UpperCamelCase = Text("GPU" , font_size=24 )
_UpperCamelCase = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.align_to(lowerCamelCase_ , lowerCamelCase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase_ )
_UpperCamelCase = [mem.copy() for i in range(6 )]
_UpperCamelCase = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
_UpperCamelCase = Text("Model" , font_size=24 )
_UpperCamelCase = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase_ , run_time=1 ) , Create(lowerCamelCase_ , run_time=1 ) , Create(lowerCamelCase_ , run_time=1 ) , )
_UpperCamelCase = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
_UpperCamelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCamelCase = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=2.5 ) , Write(lowerCamelCase_ ) , Write(lowerCamelCase_ ) )
self.add(lowerCamelCase_ )
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
for i, rect in enumerate(lowerCamelCase_ ):
_UpperCamelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.7 )
cpu_target.move_to(lowerCamelCase_ )
cpu_target.generate_target()
_UpperCamelCase = 0.46 / 4
_UpperCamelCase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase_ , buff=0.0 )
cpu_targs.append(lowerCamelCase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase_ ) )
second_animations.append(MoveToTarget(lowerCamelCase_ , run_time=1.5 ) )
self.play(*lowerCamelCase_ )
self.play(*lowerCamelCase_ )
self.wait()
| 147 |
'''simple docstring'''
import argparse
import os
import re
a_ = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
a_ = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __UpperCAmelCase (lowercase__ ,lowercase__ = False ) -> List[Any]:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.read()
a_ = content.split("\n" )
a_ = []
a_ = 0
while line_idx < len(lowercase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a_ = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a_ = sorted(lowercase__ ,key=lambda lowercase__ : _re_identifier.search(lowercase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(lowercase__ ) )
elif "\n".join(lowercase__ ) != content:
return True
def __UpperCAmelCase (lowercase__ = False ) -> Optional[int]:
'''simple docstring'''
a_ = [os.path.join(lowercase__ ,lowercase__ ) for f in os.listdir(lowercase__ ) if f.endswith(".py" )]
a_ = [sort_auto_mapping(lowercase__ ,overwrite=lowercase__ ) for fname in fnames]
if not overwrite and any(lowercase__ ):
a_ = [f for f, d in zip(lowercase__ ,lowercase__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(lowercase__ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 685 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __magic_name__ ( lowercase_ ):
_SCREAMING_SNAKE_CASE : List[Any] = 42
_SCREAMING_SNAKE_CASE : List[str] = 42
_SCREAMING_SNAKE_CASE : int = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 163 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a_ ( lowerCamelCase : Union[str, Any] ):
lowerCAmelCase = int(number**0.5 )
return number == sq * sq
def a_ ( lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
lowerCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowerCAmelCase = x_den * y_den * z_den
lowerCAmelCase = gcd(lowercase__ , lowercase__ )
top //= hcf
bottom //= hcf
return top, bottom
def a_ ( lowerCamelCase : Tuple = 35 ):
lowerCAmelCase = set()
lowerCAmelCase = 42
lowerCAmelCase = Fraction(0 )
lowerCAmelCase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowerCAmelCase = x_num * y_den + x_den * y_num
lowerCAmelCase = x_den * y_den
lowerCAmelCase = gcd(lowercase__ , lowercase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase = add_three(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
unique_s.add(lowercase__ )
# n=2
lowerCAmelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowerCAmelCase = x_den * x_den * y_den * y_den
if is_sq(lowercase__ ) and is_sq(lowercase__ ):
lowerCAmelCase = int(sqrt(lowercase__ ) )
lowerCAmelCase = int(sqrt(lowercase__ ) )
lowerCAmelCase = gcd(lowercase__ , lowercase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase = add_three(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
unique_s.add(lowercase__ )
# n=-1
lowerCAmelCase = x_num * y_num
lowerCAmelCase = x_den * y_num + x_num * y_den
lowerCAmelCase = gcd(lowercase__ , lowercase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase = add_three(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
unique_s.add(lowercase__ )
# n=2
lowerCAmelCase = x_num * x_num * y_num * y_num
lowerCAmelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowercase__ ) and is_sq(lowercase__ ):
lowerCAmelCase = int(sqrt(lowercase__ ) )
lowerCAmelCase = int(sqrt(lowercase__ ) )
lowerCAmelCase = gcd(lowercase__ , lowercase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase = add_three(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
unique_s.add(lowercase__ )
for num, den in unique_s:
total += Fraction(lowercase__ , lowercase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'''{solution() = }''')
| 133 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[Any]) ->List[str]:
'''simple docstring'''
a_ = [
[],
[],
[],
]
def _lowerCAmelCase ( self: Dict , a: int , a: int) ->None:
'''simple docstring'''
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(a)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__( self: Dict) ->str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any) ->List[str]:
'''simple docstring'''
a_ = []
def _lowerCAmelCase ( self: int , a: int) ->None:
'''simple docstring'''
if len(self.queue) == 1_00:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(a)
def _lowerCAmelCase ( self: List[str]) ->int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
a_ = min(self.queue)
self.queue.remove(a)
return data
def __str__( self: Optional[int]) ->str:
'''simple docstring'''
return str(self.queue)
def __UpperCAmelCase () -> Union[str, Any]:
'''simple docstring'''
a_ = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 0 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
__SCREAMING_SNAKE_CASE : List[Any] = F"""https://www.google.com/search?q={query}&num=100"""
__SCREAMING_SNAKE_CASE : Any = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
__SCREAMING_SNAKE_CASE : Tuple = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
__SCREAMING_SNAKE_CASE : Optional[Any] = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 661 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a_ = Dataset.from_dict(lowercase__ )
return dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = get_dataset()
a_ = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = get_dataset()
a_ , a_ = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2)
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a)
| 685 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
@dataclass(frozen=lowercase_ )
class snake_case_ :
__lowerCAmelCase = 4_2
__lowerCAmelCase = 4_2
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
@dataclass(frozen=lowercase_ )
class snake_case_ :
__lowerCAmelCase = 4_2
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class snake_case_ ( lowercase_ ):
__lowerCAmelCase = 4_2
def __init__( self , a_ , a_ , a_ , a_ = None , a_=False , a_ = False , ):
a_ : int = hans_processors[task]()
a_ : Dict = os.path.join(
a_ , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(a_ ) , a_ , ) , )
a_ : str = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a_ , a_ : Optional[int] = label_list[2], label_list[1]
a_ : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a_ : Any = cached_features_file + ".lock"
with FileLock(a_ ):
if os.path.exists(a_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
a_ : Dict = torch.load(a_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
a_ : Optional[Any] = (
processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ )
)
logger.info("Training examples: %s" , len(a_ ) )
a_ : List[Any] = hans_convert_examples_to_features(a_ , a_ , a_ , a_ )
logger.info("Saving features into cached file %s" , a_ )
torch.save(self.features , a_ )
def __len__( self ):
return len(self.features )
def __getitem__( self , a_ ):
return self.features[i]
def snake_case_ ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class snake_case_ :
__lowerCAmelCase = 4_2
def __init__( self , a_ , a_ , a_ , a_ = 1_2_8 , a_=False , a_ = False , ):
a_ : Any = hans_processors[task]()
a_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a_ , a_ : Optional[int] = label_list[2], label_list[1]
a_ : Union[str, Any] = label_list
a_ : Optional[Any] = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ )
a_ : int = hans_convert_examples_to_features(a_ , a_ , a_ , a_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(a_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
a_ : List[str] = tf.data.Dataset.from_generator(
a_ , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def snake_case_ ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self , a_ ):
return self.features[i]
def snake_case_ ( self ):
return self.label_list
class snake_case_ ( lowercase_ ):
def snake_case_ ( self , a_ ):
return self._create_examples(self._read_tsv(os.path.join(a_ , "heuristics_train_set.txt" ) ) , "train" )
def snake_case_ ( self , a_ ):
return self._create_examples(self._read_tsv(os.path.join(a_ , "heuristics_evaluation_set.txt" ) ) , "dev" )
def snake_case_ ( self ):
return ["contradiction", "entailment", "neutral"]
def snake_case_ ( self , a_ , a_ ):
a_ : Optional[Any] = []
for i, line in enumerate(a_ ):
if i == 0:
continue
a_ : Tuple = "%s-%s" % (set_type, line[0])
a_ : Any = line[5]
a_ : Any = line[6]
a_ : Optional[Any] = line[7][2:] if line[7].startswith("ex" ) else line[7]
a_ : Optional[int] = line[0]
examples.append(InputExample(guid=a_ , text_a=a_ , text_b=a_ , label=a_ , pairID=a_ ) )
return examples
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, ) -> str:
a_ : str = {label: i for i, label in enumerate(lowercase__ )}
a_ : int = []
for ex_index, example in tqdm.tqdm(enumerate(lowercase__ ), desc="convert examples to features" ):
if ex_index % 10_000 == 0:
logger.info("Writing example %d" % (ex_index) )
a_ : Optional[Any] = tokenizer(
example.text_a, example.text_b, add_special_tokens=lowercase__, max_length=lowercase__, padding="max_length", truncation=lowercase__, return_overflowing_tokens=lowercase__, )
a_ : Any = label_map[example.label] if example.label in label_map else 0
a_ : Optional[Any] = int(example.pairID )
features.append(InputFeatures(**lowercase__, label=lowercase__, pairID=lowercase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
SCREAMING_SNAKE_CASE_ = {
"""hans""": 3,
}
SCREAMING_SNAKE_CASE_ = {
"""hans""": HansProcessor,
} | 237 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , *a: str , **a: Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 | 0 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any=3 , UpperCamelCase_ : str=32 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : str=10 , UpperCamelCase_ : str=[10, 20, 30, 40] , UpperCamelCase_ : List[Any]=[1, 1, 2, 1] , UpperCamelCase_ : int=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Dict="relu" , UpperCamelCase_ : Any=3 , UpperCamelCase_ : int=None , ):
"""simple docstring"""
__A = parent
__A = batch_size
__A = image_size
__A = num_channels
__A = embeddings_size
__A = hidden_sizes
__A = depths
__A = is_training
__A = use_labels
__A = hidden_act
__A = num_labels
__A = scope
__A = len(UpperCamelCase_ )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.num_labels )
__A = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] ):
"""simple docstring"""
__A = TFRegNetModel(config=UpperCamelCase_ )
__A = model(UpperCamelCase_ , training=UpperCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase_ ( self : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] ):
"""simple docstring"""
__A = self.num_labels
__A = TFRegNetForImageClassification(UpperCamelCase_ )
__A = model(UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __lowercase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = TFRegNetModelTester(self )
__A = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(UpperCamelCase_ )
__A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Any ):
__A = model_class(UpperCamelCase_ )
__A = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) , training=UpperCamelCase_ )
__A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__A = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__A = layer_type
__A = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : int={} ):
__A = model(UpperCamelCase_ , return_dict=UpperCamelCase_ , **UpperCamelCase_ )
__A = model(UpperCamelCase_ , return_dict=UpperCamelCase_ , **UpperCamelCase_ ).to_tuple()
def recursive_check(UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict ):
if isinstance(UpperCamelCase_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase_ , UpperCamelCase_ ):
recursive_check(UpperCamelCase_ , UpperCamelCase_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(UpperCamelCase_ , UpperCamelCase_ ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(UpperCamelCase_ , UpperCamelCase_ )
for model_class in self.all_model_classes:
__A = model_class(UpperCamelCase_ )
__A = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__A = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
check_equivalence(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__A = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
__A = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
check_equivalence(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__A = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__A = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
check_equivalence(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , {"""output_hidden_states""": True} )
__A = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
__A = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
check_equivalence(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , {"""output_hidden_states""": True} )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFRegNetModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
__A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=UpperCamelCase_ , return_tensors="""tf""" )
# forward pass
__A = model(**UpperCamelCase_ , training=UpperCamelCase_ )
# verify the logits
__A = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
__A = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4 )
| 637 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Any , a: Path , a: Union[str, None] = None , a: Union[List[str], None] = None , a: Union[str, List[str], None] = None , a: bool = True , ) ->Optional[Any]:
'''simple docstring'''
a_ = [file for file in os.listdir(a) if os.path.isfile(os.path.join(a , a))]
if identifier is not None:
a_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a):
for n_ in n_identifier:
a_ = [file for file in files if n_ not in file]
else:
a_ = [file for file in files if n_identifier not in file]
a_ = ignore_files or []
ignore_files.append("__init__.py")
a_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a)
if only_modules:
a_ = file.split(".")[0]
try:
a_ = getattr(a , a)
a_ = doctest.DocTestSuite(a)
a_ = unittest.TextTestRunner().run(a)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
a_ = doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "modeling"
a_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "tokenization"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "configuration"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a)
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = Path("docs/source")
a_ = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a)
| 685 | 0 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__magic_name__ : List[Any] = """src/diffusers"""
__magic_name__ : List[str] = """."""
# This is to make sure the diffusers module imported is the one in the repo.
__magic_name__ : Dict = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
__magic_name__ : List[Any] = spec.loader.load_module()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return line.startswith(lowercase__ ) or len(lowercase__ ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , lowercase__ ) is not None
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = object_name.split("." )
_snake_case = 0
# First let's find the module where our object lives.
_snake_case = parts[i]
while i < len(lowercase__ ) and not os.path.isfile(os.path.join(lowercase__ , f'''{module}.py''' ) ):
i += 1
if i < len(lowercase__ ):
_snake_case = os.path.join(lowercase__ , parts[i] )
if i >= len(lowercase__ ):
raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(lowercase__ , f'''{module}.py''' ) , "r" , encoding="utf-8" , newline="\n" ) as f:
_snake_case = f.readlines()
# Now let's find the class / func in the code!
_snake_case = ""
_snake_case = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowercase__ ) and re.search(rf'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowercase__ ):
raise ValueError(f''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_snake_case = line_index
while line_index < len(lowercase__ ) and _should_continue(lines[line_index] , lowercase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_snake_case = lines[start_index:line_index]
return "".join(lowercase__ )
__magic_name__ : List[str] = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
__magic_name__ : List[Any] = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""")
__magic_name__ : str = re.compile(r"""<FILL\s+[^>]*>""")
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = code.split("\n" )
_snake_case = 0
while idx < len(lowercase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowercase__ ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = len(get_indent(lowercase__ ) ) > 0
if has_indent:
_snake_case = f'''class Bla:\n{code}'''
_snake_case = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=lowercase__ )
_snake_case = black.format_str(lowercase__ , mode=lowercase__ )
_snake_case , _snake_case = style_docstrings_in_code(lowercase__ )
return result[len("class Bla:\n" ) :] if has_indent else result
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
with open(lowercase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
_snake_case = f.readlines()
_snake_case = []
_snake_case = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowercase__ ):
_snake_case = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_snake_case , _snake_case , _snake_case = search.groups()
_snake_case = find_code_in_diffusers(lowercase__ )
_snake_case = get_indent(lowercase__ )
_snake_case = line_index + 1 if indent == theoretical_indent else line_index + 2
_snake_case = theoretical_indent
_snake_case = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_snake_case = True
while line_index < len(lowercase__ ) and should_continue:
line_index += 1
if line_index >= len(lowercase__ ):
break
_snake_case = lines[line_index]
_snake_case = _should_continue(lowercase__ , lowercase__ ) and re.search(f'''^{indent}# End copy''' , lowercase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_snake_case = lines[start_index:line_index]
_snake_case = "".join(lowercase__ )
# Remove any nested `Copied from` comments to avoid circular copies
_snake_case = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(lowercase__ ) is None]
_snake_case = "\n".join(lowercase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowercase__ ) > 0:
_snake_case = replace_pattern.replace("with" , "" ).split("," )
_snake_case = [_re_replace_pattern.search(lowercase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_snake_case , _snake_case , _snake_case = pattern.groups()
_snake_case = re.sub(lowercase__ , lowercase__ , lowercase__ )
if option.strip() == "all-casing":
_snake_case = re.sub(obja.lower() , obja.lower() , lowercase__ )
_snake_case = re.sub(obja.upper() , obja.upper() , lowercase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_snake_case = blackify(lines[start_index - 1] + theoretical_code )
_snake_case = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_snake_case = lines[:start_index] + [theoretical_code] + lines[line_index:]
_snake_case = start_index + 1
if overwrite and len(lowercase__ ) > 0:
# Warn the user a file has been modified.
print(f'''Detected changes, rewriting {filename}.''' )
with open(lowercase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowercase__ )
return diffs
def snake_case_ ( SCREAMING_SNAKE_CASE__ = False ):
'''simple docstring'''
_snake_case = glob.glob(os.path.join(lowercase__ , "**/*.py" ) , recursive=lowercase__ )
_snake_case = []
for filename in all_files:
_snake_case = is_copy_consistent(lowercase__ , lowercase__ )
diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(lowercase__ ) > 0:
_snake_case = "\n".join(lowercase__ )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
__magic_name__ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__magic_name__ : Any = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 672 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 100 ) -> int:
'''simple docstring'''
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.