code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case , __snake_case : List[Any] = image.size
__snake_case , __snake_case : Tuple = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
__snake_case : str = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
__snake_case : int = np.array(__lowerCamelCase ).astype(np.floataa ) / 2_5_5.0
__snake_case : Union[str, Any] = image[None].transpose(0 , 3 , 1 , 2 )
__snake_case : Union[str, Any] = torch.from_numpy(__lowerCamelCase )
return 2.0 * image - 1.0
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : VQModel , lowerCamelCase : UNetaDModel , lowerCamelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Union[str, Any]:
super().__init__()
self.register_modules(vqvae=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self : List[str] , lowerCamelCase : Union[torch.Tensor, PIL.Image.Image] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : Optional[int] = 100 , lowerCamelCase : Optional[float] = 0.0 , lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(lowerCamelCase , PIL.Image.Image ):
__snake_case : Any = 1
elif isinstance(lowerCamelCase , torch.Tensor ):
__snake_case : Any = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCamelCase )}' )
if isinstance(lowerCamelCase , PIL.Image.Image ):
__snake_case : List[Any] = preprocess(lowerCamelCase )
__snake_case , __snake_case : int = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__snake_case : str = (batch_size, self.unet.config.in_channels // 2, height, width)
__snake_case : str = next(self.unet.parameters() ).dtype
__snake_case : Tuple = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
__snake_case : List[Any] = image.to(device=self.device , dtype=lowerCamelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCamelCase , device=self.device )
__snake_case : str = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : Dict = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Union[str, Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : int = {}
if accepts_eta:
__snake_case : List[str] = eta
for t in self.progress_bar(lowerCamelCase ):
# concat latents and low resolution image in the channel dimension.
__snake_case : Union[str, Any] = torch.cat([latents, image] , dim=1 )
__snake_case : Optional[Any] = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
__snake_case : int = self.unet(lowerCamelCase , lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Union[str, Any] = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# decode the image latents with the VQVAE
__snake_case : List[Any] = self.vqvae.decode(lowerCamelCase ).sample
__snake_case : Dict = torch.clamp(lowerCamelCase , -1.0 , 1.0 )
__snake_case : Any = image / 2 + 0.5
__snake_case : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 81 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 15 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and number_of_steps > 0
), f"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
UpperCAmelCase_ , UpperCAmelCase_ = 1, 1
for _ in range(number_of_steps - 1 ):
UpperCAmelCase_ , UpperCAmelCase_ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Any , _UpperCAmelCase : NestedDataStructureLike[PathLike] , _UpperCAmelCase : Optional[NamedSplit] = None , _UpperCAmelCase : Optional[Features] = None , _UpperCAmelCase : str = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ) -> List[str]:
"""simple docstring"""
super().__init__(
_UpperCAmelCase , split=_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase , streaming=_UpperCAmelCase , num_proc=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase , data_files=_UpperCAmelCase , features=_UpperCAmelCase , **_UpperCAmelCase , )
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase , download_mode=_UpperCAmelCase , verification_mode=_UpperCAmelCase , base_path=_UpperCAmelCase , num_proc=self.num_proc , )
lowercase__ = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 15 | 0 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = '''0'''
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
lowerCAmelCase__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
lowerCAmelCase__ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
lowerCAmelCase__ = ort.RunOptions()
lowerCAmelCase__ = 128
lowerCAmelCase__ = 1
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = 2000
lowerCAmelCase__ = {}
for iter in range(max_iters):
lowerCAmelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 83 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = """ylacombe/bark-small"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = """en_speaker_1"""
lowercase__ = """This is a test string"""
lowercase__ = """speaker_embeddings_path.json"""
lowercase__ = """speaker_embeddings"""
def lowerCamelCase__ (self : str , **_UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = BarkProcessor(tokenizer=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase__ (self : str ) -> Tuple:
"""simple docstring"""
lowercase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase__ = 35
lowercase__ = 2
lowercase__ = 8
lowercase__ = {
"""semantic_prompt""": np.ones(_UpperCAmelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase__ = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
lowercase__ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase__ = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
lowercase__ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase__ = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = BarkProcessor(tokenizer=_UpperCAmelCase )
lowercase__ = processor(text=self.input_string )
lowercase__ = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 15 | 0 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = (EulerDiscreteScheduler,)
_UpperCamelCase : Tuple = 10
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
lowercase = {
'num_train_timesteps': 1100,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**snake_case )
return config
def SCREAMING_SNAKE_CASE__ ( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
lowercase = torch.manual_seed(0 )
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowercase = scheduler.scale_model_input(snake_case , snake_case )
lowercase = model(snake_case , snake_case )
lowercase = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
lowercase = output.prev_sample
lowercase = torch.sum(torch.abs(snake_case ) )
lowercase = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(prediction_type='v_prediction' )
lowercase = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
lowercase = torch.manual_seed(0 )
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowercase = scheduler.scale_model_input(snake_case , snake_case )
lowercase = model(snake_case , snake_case )
lowercase = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
lowercase = output.prev_sample
lowercase = torch.sum(torch.abs(snake_case ) )
lowercase = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 0.0_002 ) < 1E-2
assert abs(result_mean.item() - 2.2_676E-06 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
lowercase = torch.manual_seed(0 )
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowercase = sample.to(snake_case )
for t in scheduler.timesteps:
lowercase = scheduler.scale_model_input(snake_case , snake_case )
lowercase = model(snake_case , snake_case )
lowercase = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
lowercase = output.prev_sample
lowercase = torch.sum(torch.abs(snake_case ) )
lowercase = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**snake_case , use_karras_sigmas=snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
lowercase = torch.manual_seed(0 )
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowercase = sample.to(snake_case )
for t in scheduler.timesteps:
lowercase = scheduler.scale_model_input(snake_case , snake_case )
lowercase = model(snake_case , snake_case )
lowercase = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
lowercase = output.prev_sample
lowercase = torch.sum(torch.abs(snake_case ) )
lowercase = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
| 84 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
A : Optional[Any] = logging.get_logger(__name__)
A : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A : Any = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : Optional[int] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : int = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
A : Optional[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
A : Any = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
A : Union[str, Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
A : Tuple = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
A : Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ = DPRContextEncoderTokenizer
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ = DPRQuestionEncoderTokenizer
A : Any = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
A : Optional[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
A : int = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(UpperCAmelCase__ )
class A :
'''simple docstring'''
def __call__(self : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Union[bool, str] = False , _UpperCAmelCase : Union[bool, str] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[bool] = None , **_UpperCAmelCase : Any , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
elif titles is None or texts is None:
lowercase__ = titles if texts is None else texts
return super().__call__(
_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = titles if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [titles]
lowercase__ = texts if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [texts]
lowercase__ = len(_UpperCAmelCase )
lowercase__ = questions if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [questions] * n_passages
assert len(_UpperCAmelCase ) == len(
_UpperCAmelCase ), f'''There should be as many titles than texts but got {len(_UpperCAmelCase )} titles and {len(_UpperCAmelCase )} texts.'''
lowercase__ = super().__call__(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["""input_ids"""]
lowercase__ = super().__call__(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["""input_ids"""]
lowercase__ = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_UpperCAmelCase , _UpperCAmelCase )
]
}
if return_attention_mask is not False:
lowercase__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase__ = attention_mask
return self.pad(_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : BatchEncoding , _UpperCAmelCase : DPRReaderOutput , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
lowercase__ = reader_input["""input_ids"""]
lowercase__ , lowercase__ , lowercase__ = reader_output[:3]
lowercase__ = len(_UpperCAmelCase )
lowercase__ = sorted(range(_UpperCAmelCase ) , reverse=_UpperCAmelCase , key=relevance_logits.__getitem__ )
lowercase__ = []
for doc_id in sorted_docs:
lowercase__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase__ = sequence_ids.index(self.pad_token_id )
else:
lowercase__ = len(_UpperCAmelCase )
lowercase__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_UpperCAmelCase , top_spans=_UpperCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_UpperCAmelCase , start_index=_UpperCAmelCase , end_index=_UpperCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_UpperCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : int , _UpperCAmelCase : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
lowercase__ = []
for start_index, start_score in enumerate(_UpperCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase__ = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
lowercase__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
lowercase__ = end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_UpperCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class A ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = READER_PRETRAINED_VOCAB_FILES_MAP
A__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = READER_PRETRAINED_INIT_CONFIGURATION
A__ = ['''input_ids''', '''attention_mask''']
A__ = DPRReaderTokenizer
| 15 | 0 |
from __future__ import annotations
def _a ( lowercase__ : list[list[int]] ):
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(lowercase__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(lowercase__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[int] ) -> list[int]: # This function is recursive
"""simple docstring"""
lowercase__ = len(__magic_name__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ = array[0]
lowercase__ = False
lowercase__ = 1
lowercase__ = []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ = True
lowercase__ = [element for element in array[i:] if element >= array[i]]
lowercase__ = longest_subsequence(__magic_name__ )
if len(__magic_name__ ) > len(__magic_name__ ):
lowercase__ = temp_array
else:
i += 1
lowercase__ = [element for element in array[1:] if element >= pivot]
lowercase__ = [pivot, *longest_subsequence(__magic_name__ )]
if len(__magic_name__ ) > len(__magic_name__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__a :Dict = get_logger(__name__)
__a :int = Path(__file__).parent / 'model_card_template.md'
__a :int = uuida().hex
__a :Optional[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
__a :List[Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
__a :List[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def __snake_case ( __UpperCamelCase : Union[Dict, str, None] = None ):
"""simple docstring"""
A_ = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" ,"" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + user_agent
return ua
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if token is None:
A_ = HfFolder.get_token()
if organization is None:
A_ = whoami(__UpperCamelCase )["name"]
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Dict ):
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(__UpperCamelCase ,"local_rank" ) and args.local_rank not in [-1, 0]:
return
A_ = args.hub_token if hasattr(__UpperCamelCase ,"hub_token" ) else None
A_ = get_full_repo_name(__UpperCamelCase ,token=__UpperCamelCase )
A_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" ,license="apache-2.0" ,library_name="diffusers" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=__UpperCamelCase ,model_name=__UpperCamelCase ,repo_name=__UpperCamelCase ,dataset_name=args.dataset_name if hasattr(__UpperCamelCase ,"dataset_name" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__UpperCamelCase ,"gradient_accumulation_steps" ) else None
) ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta1" ) else None ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta2" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCamelCase ,"adam_weight_decay" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(__UpperCamelCase ,"adam_epsilon" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(__UpperCamelCase ,"lr_scheduler" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCamelCase ,"lr_warmup_steps" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCamelCase ,"ema_inv_gamma" ) else None ,ema_power=args.ema_power if hasattr(__UpperCamelCase ,"ema_power" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(__UpperCamelCase ,"ema_max_decay" ) else None ,mixed_precision=args.mixed_precision ,)
A_ = os.path.join(args.output_dir ,"README.md" )
model_card.save(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
A_ = str(Path(__UpperCamelCase ).as_posix() )
A_ = re.search(R"snapshots/([^/]+)/" ,__UpperCamelCase )
if search is None:
return None
A_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__UpperCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__a :Optional[int] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
__a :Union[str, Any] = os.path.join(hf_cache_home, 'diffusers')
def __snake_case ( __UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if new_cache_dir is None:
A_ = DIFFUSERS_CACHE
if old_cache_dir is None:
A_ = old_diffusers_cache
A_ = Path(__UpperCamelCase ).expanduser()
A_ = Path(__UpperCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
A_ = new_cache_dir / old_blob_path.relative_to(__UpperCamelCase )
new_blob_path.parent.mkdir(parents=__UpperCamelCase ,exist_ok=__UpperCamelCase )
os.replace(__UpperCamelCase ,__UpperCamelCase )
try:
os.symlink(__UpperCamelCase ,__UpperCamelCase )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__a :str = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
__a :Dict = 0
else:
with open(cache_version_file) as f:
try:
__a :str = int(f.read())
except ValueError:
__a :List[str] = 0
if cache_version < 1:
__a :Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
__a :List[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
'the directory exists and can be written to.'
)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if variant is not None:
A_ = weights_name.split("." )
A_ = splits[:-1] + [variant] + splits[-1:]
A_ = ".".join(__UpperCamelCase )
return weights_name
def __snake_case ( __UpperCamelCase : Any ,*,
__UpperCamelCase : List[Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Tuple=None ,):
"""simple docstring"""
A_ = str(__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__UpperCamelCase ):
if os.path.isfile(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ):
# Load from a PyTorch checkpoint
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ):
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse("0.20.0" )
):
try:
A_ = hf_hub_download(
__UpperCamelCase ,filename=_add_variant(__UpperCamelCase ,__UpperCamelCase ) ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,__UpperCamelCase ,)
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCamelCase ,__UpperCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__UpperCamelCase ,__UpperCamelCase )}\' so that the correct variant file can be added.''' ,__UpperCamelCase ,)
try:
# 2. Load model file as usual
A_ = hf_hub_download(
__UpperCamelCase ,filename=__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"this model name. Check the model page at "
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' ) | 86 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
A : List[Any] = None
A : Optional[Any] = logging.get_logger(__name__)
A : str = '▁'
A : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
A : Any = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
A : Optional[int] = {
'google/pegasus-xsum': 5_1_2,
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = PegasusTokenizer
A__ = ['''input_ids''', '''attention_mask''']
def __init__(self : Tuple , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Union[str, Any]="<pad>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : int="<unk>" , _UpperCAmelCase : Optional[Any]="<mask_2>" , _UpperCAmelCase : Optional[Any]="<mask_1>" , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Dict=103 , **_UpperCAmelCase : Dict , ) -> Dict:
"""simple docstring"""
lowercase__ = offset
if additional_special_tokens is not None:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(_UpperCAmelCase )}, but is'''
f''' {type(_UpperCAmelCase )}''' )
lowercase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(_UpperCAmelCase ) , self.offset - 1 )
]
if len(set(_UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
lowercase__ = additional_special_tokens_extended
else:
lowercase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , pad_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , mask_token_sent=_UpperCAmelCase , offset=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List , _UpperCAmelCase : Optional[List] = None , _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(_UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(_UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCamelCase__ (self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ (self : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 15 | 0 |
_lowerCamelCase : Union[str, Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowerCamelCase : Dict = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowerCamelCase : Dict = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 87 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
A : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
A : List[str] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
A : Any = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int = CHRF.CHAR_ORDER , _UpperCAmelCase : int = CHRF.WORD_ORDER , _UpperCAmelCase : int = CHRF.BETA , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , ) -> int:
"""simple docstring"""
lowercase__ = len(references[0] )
if any(len(_UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowercase__ = [[refs[i] for refs in references] for i in range(_UpperCAmelCase )]
lowercase__ = CHRF(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__ = sb_chrf.corpus_score(_UpperCAmelCase , _UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 15 | 0 |
"""simple docstring"""
import math
def _snake_case ( __snake_case : float , __snake_case : float ):
"""simple docstring"""
if (
not isinstance(__snake_case , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def _snake_case ( __snake_case : float , __snake_case : float ):
"""simple docstring"""
if (
not isinstance(__snake_case , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = PegasusTokenizer(_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def lowerCamelCase__ (self : Tuple , **_UpperCAmelCase : int ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase__ (self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = """</s>"""
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_UpperCAmelCase ) , 1103 )
def lowerCamelCase__ (self : str ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowerCamelCase__ (self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
lowercase__ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
lowercase__ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
lowercase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowercase__ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowercase__ = """To ensure a smooth flow of bank resolutions."""
lowercase__ = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowercase__ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = ["""This is going to be way too long.""" * 150, """short example"""]
lowercase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowercase__ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
lowercase__ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def lowerCamelCase__ (self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = PegasusTokenizer(_UpperCAmelCase , offset=0 , mask_token_sent=_UpperCAmelCase , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def lowerCamelCase__ (self : str , **_UpperCAmelCase : Union[str, Any] ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
lowercase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
lowercase__ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
lowercase__ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@require_torch
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = ["""This is going to be way too long.""" * 1000, """short example"""]
lowercase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowercase__ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
lowercase__ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
def lowerCamelCase__ (self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
lowercase__ = self._large_tokenizer(_UpperCAmelCase ).input_ids
self.assertListEqual(
_UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 15 | 0 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=3, lowerCamelCase=32, lowerCamelCase=3, lowerCamelCase=10, lowerCamelCase=[10, 20, 30, 40], lowerCamelCase=[1, 1, 2, 1], lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=3, lowerCamelCase=None, ) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : Any = batch_size
_lowercase : Tuple = image_size
_lowercase : Union[str, Any] = num_channels
_lowercase : int = embeddings_size
_lowercase : Any = hidden_sizes
_lowercase : str = depths
_lowercase : List[Any] = is_training
_lowercase : Dict = use_labels
_lowercase : List[str] = hidden_act
_lowercase : List[Any] = num_labels
_lowercase : Optional[Any] = scope
_lowercase : Union[str, Any] = len(lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase : List[str] = None
if self.use_labels:
_lowercase : str = ids_tensor([self.batch_size], self.num_labels)
_lowercase : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = RegNetModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[str] = model(lowerCamelCase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.num_labels
_lowercase : Optional[Any] = RegNetForImageClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[Any] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase : Optional[Any] = config_and_inputs
_lowercase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Optional[Any] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
lowercase_ : Optional[int] = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = False
lowercase_ : Optional[Any] = False
lowercase_ : List[Any] = False
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = RegNetModelTester(self)
_lowercase : Optional[Any] = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
return
@unittest.skip(reason='RegNet does not use inputs_embeds')
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='RegNet does not support input and output embeddings')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Any = model_class(lowerCamelCase)
_lowercase : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Any = [*signature.parameters.keys()]
_lowercase : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Optional[Any] = model_class(config=lowerCamelCase)
for name, module in model.named_modules():
if isinstance(lowerCamelCase, (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1), msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
self.assertTrue(
torch.all(module.bias == 0), msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase):
_lowercase : str = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : Any = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowercase : int = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase), expected_num_stages + 1)
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], )
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowercase : Dict = layer_type
_lowercase : Any = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : str = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> str:
"""simple docstring"""
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Optional[Any] = RegNetModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
def UpperCamelCase_( ) -> List[str]:
_lowercase : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Dict = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(lowerCamelCase)
_lowercase : Dict = self.default_image_processor
_lowercase : Optional[Any] = prepare_img()
_lowercase : List[str] = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Optional[int] = model(**lowerCamelCase)
# verify the logits
_lowercase : List[str] = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape, lowerCamelCase)
_lowercase : Optional[int] = torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6]).to(lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4))
| 89 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def UpperCamelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowercase__ = load_file(__magic_name__ )
lowercase__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowercase__ = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
lowercase__ = pipeline.text_encoder
else:
lowercase__ = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
lowercase__ = pipeline.unet
# find the target layer
lowercase__ = layer_infos.pop(0 )
while len(__magic_name__ ) > -1:
try:
lowercase__ = curr_layer.__getattr__(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase__ = layer_infos.pop(0 )
elif len(__magic_name__ ) == 0:
break
except Exception:
if len(__magic_name__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowercase__ = layer_infos.pop(0 )
lowercase__ = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(__magic_name__ )
else:
pair_keys.append(__magic_name__ )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowercase__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowercase__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__magic_name__ , __magic_name__ ).unsqueeze(2 ).unsqueeze(3 )
else:
lowercase__ = state_dict[pair_keys[0]].to(torch.floataa )
lowercase__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__magic_name__ , __magic_name__ )
# update visited list
for item in pair_keys:
visited.append(__magic_name__ )
return pipeline
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
A : str = parser.parse_args()
A : Tuple = args.base_model_path
A : List[str] = args.checkpoint_path
A : Optional[int] = args.dump_path
A : Optional[int] = args.lora_prefix_unet
A : Any = args.lora_prefix_text_encoder
A : Any = args.alpha
A : List[str] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
A : int = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 15 | 0 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[Any] = XLMRobertaTokenizer
lowercase__ : Optional[Any] = XLMRobertaTokenizerFast
lowercase__ : Union[str, Any] = True
lowercase__ : List[str] = True
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = XLMRobertaTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = '''<pad>'''
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase_ ) , 10_02 )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = XLMRobertaTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase__ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCAmelCase__ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase_ , f.name )
lowerCAmelCase__ = XLMRobertaTokenizer(f.name , keep_accents=lowerCamelCase_ )
lowerCAmelCase__ = pickle.dumps(lowerCamelCase_ )
pickle.loads(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase__ = tokenizer.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ )
lowerCAmelCase__ = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = '''Hello World!'''
lowerCAmelCase__ = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowerCAmelCase__ = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
# fmt: off
lowerCAmelCase__ = {'''input_ids''': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , ) | 90 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Tuple = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''ibert'''
def __init__(self : int , _UpperCAmelCase : Union[str, Any]=3_0522 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[Any]=3072 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Dict=1E-1_2 , _UpperCAmelCase : int=1 , _UpperCAmelCase : str=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : int="absolute" , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[Any]="none" , **_UpperCAmelCase : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = quant_mode
lowercase__ = force_dequant
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase__ (self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 15 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[str] = BioGptTokenizer
_lowerCamelCase: Tuple = False
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
A = dict(zip(A_ ,range(len(A_ ) ) ) )
A = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file ,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple ) -> int:
A = 'lower newer'
A = 'lower newer'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = BioGptTokenizer(self.vocab_file ,self.merges_file )
A = 'lower'
A = ['low', 'er</w>']
A = tokenizer.tokenize(A_ )
self.assertListEqual(A_ ,A_ )
A = tokens + ['<unk>']
A = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
A = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
A = tokenizer.encode('sequence builders' ,add_special_tokens=A_ )
A = tokenizer.encode('multi-sequence build' ,add_special_tokens=A_ )
A = tokenizer.build_inputs_with_special_tokens(A_ )
A = tokenizer.build_inputs_with_special_tokens(A_ ,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a ) | 91 |
from math import log
from scipy.constants import Boltzmann, physical_constants
A : Any = 3_0_0 # TEMPERATURE (unit = K)
def UpperCamelCase ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Dict =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowercase : int =get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(UpperCAmelCase__ ) , torch_builtin(UpperCAmelCase__ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCAmelCase__ ) , gelu_new(UpperCAmelCase__ ) ) )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Union[str, Any] =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowercase : List[str] =get_activation('''gelu''' )
lowercase : Optional[int] =get_activation('''gelu_10''' )
lowercase : Union[str, Any] =torch_builtin(UpperCAmelCase__ )
lowercase : int =geluaa(UpperCAmelCase__ )
lowercase : Dict =torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCAmelCase__ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(UpperCAmelCase__ ):
get_activation('''bogus''' )
with self.assertRaises(UpperCAmelCase__ ):
get_activation(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Tuple =get_activation('''gelu''' )
lowercase : Optional[int] =1
lowercase : List[Any] =get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCAmelCase__ ):
lowercase : Tuple =acta.a
| 92 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class A :
'''simple docstring'''
A__ = 42
A__ = None
A__ = None
def UpperCamelCase ( ) -> Node | None:
"""simple docstring"""
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> int:
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCamelCase ( __magic_name__ : Node | None ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCamelCase ( __magic_name__ : Node | None , __magic_name__ : int ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
def populate_output(__magic_name__ : Node | None , __magic_name__ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__magic_name__ , __magic_name__ )
return output
def UpperCamelCase ( __magic_name__ : Node | None , __magic_name__ : int ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
def populate_output(__magic_name__ : Node | None , __magic_name__ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__magic_name__ , __magic_name__ )
return output
def UpperCamelCase ( __magic_name__ : Node | None ) -> Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(__magic_name__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__magic_name__ , __magic_name__ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(__magic_name__ , __magic_name__ ) )
lowercase__ = 0
return output
def UpperCamelCase ( ) -> None: # Main function for testing.
"""simple docstring"""
lowercase__ = make_tree()
print(f'''In-order Traversal: {inorder(__magic_name__ )}''' )
print(f'''Pre-order Traversal: {preorder(__magic_name__ )}''' )
print(f'''Post-order Traversal: {postorder(__magic_name__ )}''' , """\n""" )
print(f'''Height of Tree: {height(__magic_name__ )}''' , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__magic_name__ ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__magic_name__ ) + 1 ):
print(f'''Level {level}:''' , get_nodes_from_left_to_right(__magic_name__ , level=__magic_name__ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 15 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = MODEL_FOR_CAUSAL_LM_MAPPING
__magic_name__ :Optional[Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ :Union[str, Any] = text_generator('This is a test' , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
lowerCAmelCase__ :Optional[int] = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
__UpperCAmelCase , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
lowerCAmelCase__ :Optional[int] = text_generator('This is a test' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{'generated_token_ids': ANY(__UpperCAmelCase )},
{'generated_token_ids': ANY(__UpperCAmelCase )},
] , )
lowerCAmelCase__ :Optional[int] = text_generator.model.config.eos_token_id
lowerCAmelCase__ :int = '<pad>'
lowerCAmelCase__ :Union[str, Any] = text_generator(
['This is a test', 'This is a second test'] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , )
self.assertEqual(
__UpperCAmelCase , [
[
{'generated_token_ids': ANY(__UpperCAmelCase )},
{'generated_token_ids': ANY(__UpperCAmelCase )},
],
[
{'generated_token_ids': ANY(__UpperCAmelCase )},
{'generated_token_ids': ANY(__UpperCAmelCase )},
],
] , )
@require_tf
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ :str = text_generator('This is a test' , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
lowerCAmelCase__ :Union[str, Any] = text_generator(['This is a test', 'This is a second test'] , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
return text_generator, ["This is a test", "Another test"]
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = 'Hello I believe in'
lowerCAmelCase__ :Any = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase__ :Union[str, Any] = text_generator(__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
lowerCAmelCase__ :Optional[int] = text_generator(__UpperCAmelCase , stop_sequence=' fe' )
self.assertEqual(__UpperCAmelCase , [{'generated_text': 'Hello I believe in fe'}] )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = text_generator.model
lowerCAmelCase__ :Dict = text_generator.tokenizer
lowerCAmelCase__ :Any = text_generator('This is a test' )
self.assertEqual(__UpperCAmelCase , [{'generated_text': ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCAmelCase__ :Dict = text_generator('This is a test' , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'generated_text': ANY(__UpperCAmelCase )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCAmelCase__ :str = pipeline(task='text-generation' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = text_generator('This is a test' )
self.assertEqual(__UpperCAmelCase , [{'generated_text': ANY(__UpperCAmelCase )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCAmelCase__ :Optional[Any] = text_generator('This is a test' , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'generated_text': ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCAmelCase__ :int = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'generated_text': ANY(__UpperCAmelCase )}, {'generated_text': ANY(__UpperCAmelCase )}],
[{'generated_text': ANY(__UpperCAmelCase )}, {'generated_text': ANY(__UpperCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCAmelCase__ :Optional[Any] = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'generated_text': ANY(__UpperCAmelCase )}, {'generated_text': ANY(__UpperCAmelCase )}],
[{'generated_text': ANY(__UpperCAmelCase )}, {'generated_text': ANY(__UpperCAmelCase )}],
] , )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :List[Any] = text_generator('test' , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = text_generator('test' , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :str = text_generator('test' , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCAmelCase__ :Dict = text_generator('' )
self.assertEqual(__UpperCAmelCase , [{'generated_text': ANY(__UpperCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCAmelCase__ :str = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCAmelCase__ :int = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 5_0_0 , max_new_tokens=2_0 )
lowerCAmelCase__ :int = text_generator('This is a test' * 5_0_0 , handle_long_generation='hole' , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(__UpperCAmelCase ):
text_generator(
'This is a test' * 5_0_0 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def snake_case ( self ):
'''simple docstring'''
import torch
# Classic `model_kwargs`
lowerCAmelCase__ :Optional[int] = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ :Dict = pipe('This is a test' )
self.assertEqual(
__UpperCAmelCase , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCAmelCase__ :Union[str, Any] = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ :Union[str, Any] = pipe('This is a test' )
self.assertEqual(
__UpperCAmelCase , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCAmelCase__ :List[str] = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCAmelCase__ :Any = pipe('This is a test' )
self.assertEqual(
__UpperCAmelCase , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def snake_case ( self ):
'''simple docstring'''
import torch
lowerCAmelCase__ :List[str] = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def snake_case ( self ):
'''simple docstring'''
import torch
lowerCAmelCase__ :Union[str, Any] = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=__UpperCAmelCase , top_p=0.5 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = 'Hello world'
lowerCAmelCase__ :Optional[Any] = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
lowerCAmelCase__ :Any = logging.get_logger('transformers.generation.tf_utils' )
else:
lowerCAmelCase__ :List[str] = logging.get_logger('transformers.generation.utils' )
lowerCAmelCase__ :Union[str, Any] = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ :Optional[Any] = text_generator(__UpperCAmelCase , max_length=1_0 , max_new_tokens=1 )
self.assertIn(__UpperCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ :List[str] = text_generator(__UpperCAmelCase , max_new_tokens=1 )
self.assertNotIn(__UpperCAmelCase , cl.out )
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ :Dict = text_generator(__UpperCAmelCase , max_length=1_0 )
self.assertNotIn(__UpperCAmelCase , cl.out )
| 93 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Any = logging.get_logger(__name__)
A : Tuple = {
'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''poolformer'''
def __init__(self : Dict , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : str=16 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : Union[str, Any]=4.0 , _UpperCAmelCase : str=[2, 2, 6, 2] , _UpperCAmelCase : int=[64, 128, 320, 512] , _UpperCAmelCase : Union[str, Any]=[7, 3, 3, 3] , _UpperCAmelCase : List[Any]=[4, 2, 2, 2] , _UpperCAmelCase : Union[str, Any]=[2, 1, 1, 1] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=1E-5 , _UpperCAmelCase : Tuple=0.02 , **_UpperCAmelCase : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = stride
lowercase__ = padding
lowercase__ = pool_size
lowercase__ = hidden_sizes
lowercase__ = mlp_ratio
lowercase__ = depths
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = num_encoder_blocks
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_layer_scale
lowercase__ = layer_scale_init_value
lowercase__ = initializer_range
super().__init__(**_UpperCAmelCase )
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = version.parse('''1.11''' )
@property
def lowerCamelCase__ (self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ (self : Dict ) -> float:
"""simple docstring"""
return 2E-3
| 15 | 0 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowercase_ ( __A : Dict , __A : str=1 ) -> Any:
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def lowercase_ ( __A : Dict , __A : Dict=0 ) -> str:
"""simple docstring"""
lowercase : str =[]
for old_item in old_list:
lowercase : int =old_item.replace('''in_layers.0''' , '''norm1''' )
lowercase : str =new_item.replace('''in_layers.2''' , '''conv1''' )
lowercase : int =new_item.replace('''out_layers.0''' , '''norm2''' )
lowercase : Optional[Any] =new_item.replace('''out_layers.3''' , '''conv2''' )
lowercase : Optional[Any] =new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
lowercase : str =new_item.replace('''skip_connection''' , '''conv_shortcut''' )
lowercase : str =shave_segments(__A , n_shave_prefix_segments=__A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowercase_ ( __A : Any , __A : Dict=0 ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any =[]
for old_item in old_list:
lowercase : List[Any] =old_item
lowercase : int =new_item.replace('''norm.weight''' , '''group_norm.weight''' )
lowercase : List[Any] =new_item.replace('''norm.bias''' , '''group_norm.bias''' )
lowercase : Any =new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
lowercase : Union[str, Any] =new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
lowercase : Union[str, Any] =shave_segments(__A , n_shave_prefix_segments=__A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowercase_ ( __A : Union[str, Any] , __A : Optional[Any] , __A : Union[str, Any] , __A : Optional[Any]=None , __A : List[str]=None , __A : List[str]=None ) -> int:
"""simple docstring"""
assert isinstance(__A , __A ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowercase : List[Any] =old_checkpoint[path]
lowercase : List[str] =old_tensor.shape[0] // 3
lowercase : str =(-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowercase : int =old_tensor.shape[0] // config['''num_head_channels'''] // 3
lowercase : int =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowercase , lowercase , lowercase : Any =old_tensor.split(channels // num_heads , dim=1 )
lowercase : Optional[int] =query.reshape(__A )
lowercase : List[str] =key.reshape(__A )
lowercase : Dict =value.reshape(__A )
for path in paths:
lowercase : Optional[int] =path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowercase : Dict =new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
lowercase : Optional[Any] =new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
lowercase : Any =new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
lowercase : Union[str, Any] =new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowercase : List[str] =old_checkpoint[path['''old''']][:, :, 0]
else:
lowercase : int =old_checkpoint[path['''old''']]
def lowercase_ ( __A : List[str] , __A : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[int] ={}
lowercase : Tuple =checkpoint['''time_embed.0.weight''']
lowercase : Dict =checkpoint['''time_embed.0.bias''']
lowercase : str =checkpoint['''time_embed.2.weight''']
lowercase : Tuple =checkpoint['''time_embed.2.bias''']
lowercase : Any =checkpoint['''input_blocks.0.0.weight''']
lowercase : List[str] =checkpoint['''input_blocks.0.0.bias''']
lowercase : Dict =checkpoint['''out.0.weight''']
lowercase : Optional[int] =checkpoint['''out.0.bias''']
lowercase : Union[str, Any] =checkpoint['''out.2.weight''']
lowercase : List[str] =checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
lowercase : List[Any] =len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
lowercase : List[Any] ={
layer_id: [key for key in checkpoint if F'input_blocks.{layer_id}' in key]
for layer_id in range(__A )
}
# Retrieves the keys for the middle blocks only
lowercase : Optional[int] =len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
lowercase : Optional[Any] ={
layer_id: [key for key in checkpoint if F'middle_block.{layer_id}' in key]
for layer_id in range(__A )
}
# Retrieves the keys for the output blocks only
lowercase : int =len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
lowercase : int ={
layer_id: [key for key in checkpoint if F'output_blocks.{layer_id}' in key]
for layer_id in range(__A )
}
for i in range(1 , __A ):
lowercase : Dict =(i - 1) // (config['''num_res_blocks'''] + 1)
lowercase : int =(i - 1) % (config['''num_res_blocks'''] + 1)
lowercase : int =[key for key in input_blocks[i] if F'input_blocks.{i}.0' in key]
lowercase : str =[key for key in input_blocks[i] if F'input_blocks.{i}.1' in key]
if F'input_blocks.{i}.0.op.weight' in checkpoint:
lowercase : Optional[Any] =checkpoint[
F'input_blocks.{i}.0.op.weight'
]
lowercase : Optional[Any] =checkpoint[
F'input_blocks.{i}.0.op.bias'
]
continue
lowercase : Any =renew_resnet_paths(__A )
lowercase : Any ={'''old''': F'input_blocks.{i}.0', '''new''': F'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
lowercase : str ={'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
__A , __A , __A , additional_replacements=[meta_path, resnet_op] , config=__A )
if len(__A ):
lowercase : Tuple =renew_attention_paths(__A )
lowercase : Tuple ={
'''old''': F'input_blocks.{i}.1',
'''new''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
lowercase : Tuple ={
F'input_blocks.{i}.1.qkv.bias': {
'''key''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'''query''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'''value''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
F'input_blocks.{i}.1.qkv.weight': {
'''key''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'''query''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'''value''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
__A , __A , __A , additional_replacements=[meta_path] , attention_paths_to_split=__A , config=__A , )
lowercase : Dict =middle_blocks[0]
lowercase : Optional[int] =middle_blocks[1]
lowercase : Tuple =middle_blocks[2]
lowercase : int =renew_resnet_paths(__A )
assign_to_checkpoint(__A , __A , __A , config=__A )
lowercase : Dict =renew_resnet_paths(__A )
assign_to_checkpoint(__A , __A , __A , config=__A )
lowercase : List[Any] =renew_attention_paths(__A )
lowercase : Tuple ={
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
__A , __A , __A , attention_paths_to_split=__A , config=__A )
for i in range(__A ):
lowercase : str =i // (config['''num_res_blocks'''] + 1)
lowercase : Dict =i % (config['''num_res_blocks'''] + 1)
lowercase : Optional[int] =[shave_segments(__A , 2 ) for name in output_blocks[i]]
lowercase : Tuple ={}
for layer in output_block_layers:
lowercase , lowercase : Union[str, Any] =layer.split('''.''' )[0], shave_segments(__A , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__A )
else:
lowercase : List[Any] =[layer_name]
if len(__A ) > 1:
lowercase : Union[str, Any] =[key for key in output_blocks[i] if F'output_blocks.{i}.0' in key]
lowercase : Optional[int] =[key for key in output_blocks[i] if F'output_blocks.{i}.1' in key]
lowercase : List[str] =renew_resnet_paths(__A )
lowercase : List[Any] =renew_resnet_paths(__A )
lowercase : Optional[int] ={'''old''': F'output_blocks.{i}.0', '''new''': F'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(__A , __A , __A , additional_replacements=[meta_path] , config=__A )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowercase : Optional[int] =list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
lowercase : Optional[Any] =checkpoint[
F'output_blocks.{i}.{index}.conv.weight'
]
lowercase : Tuple =checkpoint[
F'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(__A ) == 2:
lowercase : Optional[int] =[]
if len(__A ):
lowercase : List[Any] =renew_attention_paths(__A )
lowercase : int ={
'''old''': F'output_blocks.{i}.1',
'''new''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
lowercase : Tuple ={
F'output_blocks.{i}.1.qkv.bias': {
'''key''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'''query''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'''value''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
F'output_blocks.{i}.1.qkv.weight': {
'''key''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'''query''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'''value''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
__A , __A , __A , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=__A , )
else:
lowercase : Union[str, Any] =renew_resnet_paths(__A , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowercase : int ='''.'''.join(['''output_blocks''', str(__A ), path['''old''']] )
lowercase : Dict ='''.'''.join(['''up_blocks''', str(__A ), '''resnets''', str(__A ), path['''new''']] )
lowercase : Tuple =checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read())
SCREAMING_SNAKE_CASE = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
SCREAMING_SNAKE_CASE = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
SCREAMING_SNAKE_CASE = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
SCREAMING_SNAKE_CASE = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
SCREAMING_SNAKE_CASE = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 94 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@slow
@require_torch
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
lowercase__ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
lowercase__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowercase__ = bertabert.config.encoder.vocab_size
lowercase__ = tokenizer.sep_token_id
lowercase__ = tokenizer.cls_token_id
lowercase__ = 128
lowercase__ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
lowercase__ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
lowercase__ = train_dataset.select(range(32 ) )
lowercase__ = val_dataset.select(range(16 ) )
lowercase__ = 4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase : Any ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_UpperCAmelCase , max_length=512 )
lowercase__ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_UpperCAmelCase , max_length=128 )
lowercase__ = inputs.input_ids
lowercase__ = inputs.attention_mask
lowercase__ = outputs.input_ids
lowercase__ = outputs.input_ids.copy()
lowercase__ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
lowercase__ = outputs.attention_mask
assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase : List[Any] ):
lowercase__ = pred.label_ids
lowercase__ = pred.predictions
# all unnecessary tokens are removed
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
lowercase__ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
lowercase__ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
lowercase__ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = SeqaSeqTrainingArguments(
output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy="""steps""" , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# start training
trainer.train()
| 15 | 0 |
"""simple docstring"""
class UpperCamelCase_ (__A ):
pass
class UpperCamelCase_ (__A ):
pass
class UpperCamelCase_ :
def __init__( self : List[str] ) -> Tuple:
UpperCAmelCase_ : int = [
[],
[],
[],
]
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(lowerCAmelCase_ )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self : int ) -> str:
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class UpperCamelCase_ :
def __init__( self : int ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = []
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int ) -> None:
if len(self.queue ) == 100:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
UpperCAmelCase_ : Union[str, Any] = min(self.queue )
self.queue.remove(lowerCAmelCase_ )
return data
def __str__( self : Dict ) -> str:
return str(self.queue )
def snake_case ( ):
UpperCAmelCase_ : Dict = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,1_00 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,1_28 )
print(A__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(A__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def snake_case ( ):
UpperCAmelCase_ : Dict = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(A__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(A__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 95 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : Union[str, Any] = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = ['ConvNextFeatureExtractor']
A : Optional[Any] = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
A : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 15 | 0 |
"""simple docstring"""
def a ( ) -> list[list[int]]:
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
__lowerCamelCase = generate_large_matrix()
__lowerCamelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def a ( __UpperCAmelCase : list[list[int]] ) -> None:
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def a ( __UpperCAmelCase : list[int] ) -> int:
__magic_name__: Tuple = 0
__magic_name__: Optional[Any] = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__magic_name__: List[str] = (left + right) // 2
__magic_name__: Optional[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__magic_name__: str = mid + 1
else:
__magic_name__: List[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def a ( __UpperCAmelCase : list[list[int]] ) -> int:
__magic_name__: List[str] = 0
__magic_name__: Optional[int] = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
__magic_name__: Tuple = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def a ( __UpperCAmelCase : list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def a ( __UpperCAmelCase : list[list[int]] ) -> int:
__magic_name__: int = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def a ( ) -> None:
from timeit import timeit
print("""Running benchmarks""" )
__magic_name__: Any = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__magic_name__: Optional[Any] = timeit(f'{func}(grid=grid)' , setup=__UpperCAmelCase , number=5_0_0 )
print(f'{func}() took {time:0.4f} seconds' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 96 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCamelCase ( __magic_name__ : Dict , __magic_name__ : List[str]=7 ) -> Dict:
"""simple docstring"""
lowercase__ = None
if token is not None:
lowercase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
lowercase__ = """636036"""
lowercase__ = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
lowercase__ = requests.get(__magic_name__ , headers=__magic_name__ ).json()
return result["workflow_runs"]
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
lowercase__ = get_daily_ci_runs(__magic_name__ )
lowercase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase__ = workflow_run["""id"""]
break
return workflow_run_id
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> str:
"""simple docstring"""
lowercase__ = get_last_daily_ci_runs(__magic_name__ )
if workflow_run_id is not None:
lowercase__ = get_artifacts_links(worflow_run_id=__magic_name__ , token=__magic_name__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=__magic_name__ , artifact_url=__magic_name__ , output_dir=__magic_name__ , token=__magic_name__ )
def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
get_last_daily_ci_artifacts(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = {}
for artifact_name in artifact_names:
lowercase__ = os.path.join(__magic_name__ , f'''{artifact_name}.zip''' )
if os.path.isfile(__magic_name__ ):
lowercase__ = {}
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
with z.open(__magic_name__ ) as f:
lowercase__ = f.read().decode("""UTF-8""" )
return results
| 15 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 97 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class A ( unittest.TestCase ):
'''simple docstring'''
A__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase , top_k=2 )
lowercase__ = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
for example in examples:
lowercase__ = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{"""score""": ANY(_UpperCAmelCase ), """label""": ANY(_UpperCAmelCase )},
{"""score""": ANY(_UpperCAmelCase ), """label""": ANY(_UpperCAmelCase )},
] , )
@require_torch
def lowerCamelCase__ (self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
lowercase__ = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
lowercase__ = pipeline(
"""video-classification""" , model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , frame_sampling_rate=4 )
lowercase__ = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
lowercase__ = video_classifier(_UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}] , )
lowercase__ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
] , )
@require_tf
def lowerCamelCase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
| 15 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Tuple = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowercase__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : Optional[Any] = 1_6
A : str = 3_2
def UpperCamelCase ( __magic_name__ : Accelerator , __magic_name__ : int = 16 ) -> List[str]:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__magic_name__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__magic_name__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
__magic_name__ , padding="""longest""" , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
lowercase__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A : Union[str, Any] = mocked_dataloaders # noqa: F811
def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __magic_name__ ) == "1":
lowercase__ = 2
# New Code #
lowercase__ = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowercase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__magic_name__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["""lr"""]
lowercase__ = int(config["""num_epochs"""] )
lowercase__ = int(config["""seed"""] )
lowercase__ = int(config["""batch_size"""] )
lowercase__ = evaluate.load("""glue""" , """mrpc""" )
set_seed(__magic_name__ )
lowercase__ , lowercase__ = get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=__magic_name__ )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=100 , num_training_steps=(len(__magic_name__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__magic_name__ ):
lowercase__ = model(**__magic_name__ )
lowercase__ = output.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**__magic_name__ )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __magic_name__ )
def UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__magic_name__ , default=__magic_name__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__magic_name__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowercase__ = parser.parse_args()
lowercase__ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 15 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = XLNetTokenizer
_lowerCamelCase = XLNetTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def snake_case_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__a = XLNetTokenizer(__A , keep_accents=__A )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self ):
__a = """<s>"""
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def snake_case_ ( self ):
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(__A ) , 1006 )
def snake_case_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def snake_case_ ( self ):
__a = XLNetTokenizer(__A , keep_accents=__A )
__a = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [285, 46, 10, 170, 382] )
__a = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__a = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
__a = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def snake_case_ ( self ):
__a = XLNetTokenizer(__A , do_lower_case=__A )
__a = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def snake_case_ ( self ):
__a = XLNetTokenizer(__A , do_lower_case=__A )
__a = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def snake_case_ ( self ):
__a = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
__a = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
__a = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
__a = tokenizer.build_inputs_with_special_tokens(__A )
__a = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def snake_case_ ( self ):
# fmt: off
__a = {"""input_ids""": [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 99 |
import os
import sys
A : Tuple = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
A : Dict = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : List[Any] ) -> str:
"""simple docstring"""
return AutoConfig.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCamelCase ( *__magic_name__ : Any , **__magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return AutoModel.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCamelCase ( *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCamelCase ( *__magic_name__ : Any , **__magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCamelCase ( *__magic_name__ : Dict , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*__magic_name__ , **__magic_name__ )
| 15 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A : List[Any] = logging.get_logger(__name__)
_A : List[Any] = """▁"""
_A : Optional[int] = {"""vocab_file""": """spiece.model"""}
_A : Optional[int] = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
_A : Tuple = {
"""google/reformer-crime-and-punishment""": 52_42_88,
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : int = VOCAB_FILES_NAMES
lowerCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self , A_ , A_="</s>" , A_="<unk>" , A_=[] , A_ = None , **A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A_ , unk_token=A_ , additional_special_tokens=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
return state
def __setstate__( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self , A_ ):
'''simple docstring'''
return self.sp_model.encode(A_ , out_type=A_ )
def lowercase_ ( self , A_ ):
'''simple docstring'''
return self.sp_model.piece_to_id(A_ )
def lowercase_ ( self , A_ ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
SCREAMING_SNAKE_CASE__ = self.sp_model.IdToPiece(A_ )
return token
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
SCREAMING_SNAKE_CASE__ = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def lowercase_ ( self , A_ , A_ = None ):
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
| 100 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any=13 , _UpperCAmelCase : str=7 , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Optional[int]=99 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Dict="last" , _UpperCAmelCase : Any=None , _UpperCAmelCase : int=None , ) -> int:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_lengths
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = gelu_activation
lowercase__ = sinusoidal_embeddings
lowercase__ = causal
lowercase__ = asm
lowercase__ = n_langs
lowercase__ = vocab_size
lowercase__ = n_special
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = summary_type
lowercase__ = use_proj
lowercase__ = scope
def lowerCamelCase__ (self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_input_lengths:
lowercase__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , 2 ).float()
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCamelCase__ (self : int ) -> Dict:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = FlaubertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , lengths=_UpperCAmelCase , langs=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , langs=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = FlaubertWithLMHeadModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = FlaubertForQuestionAnsweringSimple(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , ) -> List[Any]:
"""simple docstring"""
lowercase__ = FlaubertForQuestionAnswering(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , p_mask=_UpperCAmelCase , )
lowercase__ = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , )
((lowercase__) , ) = result_with_labels.to_tuple()
lowercase__ = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
((lowercase__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , ) -> List[str]:
"""simple docstring"""
lowercase__ = FlaubertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , ) -> str:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = FlaubertForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , ) -> List[str]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = FlaubertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
A__ = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCamelCase__ (self : Any , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
lowercase__ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def lowerCamelCase__ (self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = FlaubertModelTester(self )
lowercase__ = ConfigTester(self , config_class=_UpperCAmelCase , emb_dim=37 )
def lowerCamelCase__ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_UpperCAmelCase )
@slow
def lowerCamelCase__ (self : int ) -> List[Any]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = FlaubertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@slow
@require_torch_gpu
def lowerCamelCase__ (self : Dict ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ = True
lowercase__ = model_class(config=_UpperCAmelCase )
lowercase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = torch.jit.trace(
_UpperCAmelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , """traced_model.pt""" ) )
lowercase__ = torch.jit.load(os.path.join(_UpperCAmelCase , """traced_model.pt""" ) , map_location=_UpperCAmelCase )
loaded(inputs_dict["""input_ids"""].to(_UpperCAmelCase ) , inputs_dict["""attention_mask"""].to(_UpperCAmelCase ) )
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase__ (self : Tuple ) -> Dict:
"""simple docstring"""
lowercase__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowercase__ = model(_UpperCAmelCase )[0]
lowercase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__ = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 15 | 0 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = BertTokenizer
_UpperCAmelCase = BertTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = filter_non_english
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE_ : Tuple = 'unwanted, running'
return input_text, output_text
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowerCAmelCase__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE_ : int = tokenizer.tokenize(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# With lower casing
SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer(do_lower_case=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.tokenize(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = tokenizer.encode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BasicTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = BasicTokenizer(do_lower_case=lowerCAmelCase__ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = BasicTokenizer()
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'a\n\'ll !!to?\'d of, can\'t.'
SCREAMING_SNAKE_CASE_ : Tuple = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
SCREAMING_SNAKE_CASE_ : List[str] = {}
for i, token in enumerate(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = i
SCREAMING_SNAKE_CASE_ : List[str] = WordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.tokenizer_class.from_pretrained('bert-base-uncased' )
SCREAMING_SNAKE_CASE_ : str = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def UpperCamelCase__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE_ : List[str] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_r.encode_plus(
lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase__ , 'do_lower_case' ) else False
SCREAMING_SNAKE_CASE_ : int = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['的', '人', '有']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''.join(lowerCAmelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = tokenizer_p.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = tokenizer_r.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Dict = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = tokenizer_p.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE_ : Optional[int] = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase__ )
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 101 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase ( __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
lowercase__ = emb.weight.data
return lin_layer
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str="facebook/mbart-large-en-ro" , __magic_name__ : Any=False , __magic_name__ : int=False ) -> Optional[int]:
"""simple docstring"""
lowercase__ = torch.load(__magic_name__ , map_location="""cpu""" )["""model"""]
remove_ignore_keys_(__magic_name__ )
lowercase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0]
lowercase__ = MBartConfig.from_pretrained(__magic_name__ , vocab_size=__magic_name__ )
if mbart_aa and finetuned:
lowercase__ = """relu"""
lowercase__ = state_dict["""decoder.embed_tokens.weight"""]
lowercase__ = MBartForConditionalGeneration(__magic_name__ )
model.model.load_state_dict(__magic_name__ )
if finetuned:
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
A : Any = parser.parse_args()
A : str = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 15 | 0 |
"""simple docstring"""
__magic_name__ : dict[tuple[int, int, int], int] = {}
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCamelCase : Any = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCamelCase : Dict = _calculate(days - 1 , SCREAMING_SNAKE_CASE , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCamelCase : int = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCamelCase : Dict = _calculate(days - 1 , SCREAMING_SNAKE_CASE , 0 )
UpperCamelCase : Optional[int] = state_late + state_absent + state_ontime
UpperCamelCase : Any = prizestrings
return prizestrings
def UpperCamelCase (SCREAMING_SNAKE_CASE = 30 ):
return _calculate(SCREAMING_SNAKE_CASE , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 102 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def UpperCamelCase ( __magic_name__ : Any ) -> int:
"""simple docstring"""
lowercase__ = model.config
lowercase__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
lowercase__ = MBartConfig(
is_decoder=__magic_name__ , is_encoder_decoder=__magic_name__ , add_cross_attention=__magic_name__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__magic_name__ , add_final_layer_norm=__magic_name__ , )
return encoder_config, decoder_config
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if "encoder.model" in name:
lowercase__ = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
lowercase__ = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
lowercase__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
lowercase__ = """encoder.""" + name
if "attn.proj" in name:
lowercase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
lowercase__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
lowercase__ = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
lowercase__ = """encoder.layernorm.bias"""
return name
def UpperCamelCase ( __magic_name__ : Any , __magic_name__ : str ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase__ = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
lowercase__ = key.split(""".""" )
lowercase__ = int(key_split[3] )
lowercase__ = int(key_split[5] )
lowercase__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[dim : dim * 2, :]
lowercase__ = val[-dim:, :]
else:
lowercase__ = val[:dim]
lowercase__ = val[dim : dim * 2]
lowercase__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowercase__ = val
return orig_state_dict
def UpperCamelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : List[Any]=None , __magic_name__ : Dict=False ) -> int:
"""simple docstring"""
lowercase__ = DonutModel.from_pretrained(__magic_name__ ).eval()
# load HuggingFace model
lowercase__ , lowercase__ = get_configs(__magic_name__ )
lowercase__ = DonutSwinModel(__magic_name__ )
lowercase__ = MBartForCausalLM(__magic_name__ )
lowercase__ = VisionEncoderDecoderModel(encoder=__magic_name__ , decoder=__magic_name__ )
model.eval()
lowercase__ = original_model.state_dict()
lowercase__ = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# verify results on scanned document
lowercase__ = load_dataset("""hf-internal-testing/example-documents""" )
lowercase__ = dataset["""test"""][0]["""image"""].convert("""RGB""" )
lowercase__ = XLMRobertaTokenizerFast.from_pretrained(__magic_name__ , from_slow=__magic_name__ )
lowercase__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowercase__ = DonutProcessor(__magic_name__ , __magic_name__ )
lowercase__ = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowercase__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowercase__ = """When is the coffee break?"""
lowercase__ = task_prompt.replace("""{user_input}""" , __magic_name__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowercase__ = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowercase__ = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowercase__ = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowercase__ = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowercase__ = """hello world"""
else:
raise ValueError("""Model name not supported""" )
lowercase__ = original_model.decoder.tokenizer(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors="""pt""" )[
"""input_ids"""
]
lowercase__ = original_model.encoder.model.patch_embed(__magic_name__ )
lowercase__ , lowercase__ = model.encoder.embeddings(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 )
# verify encoder hidden states
lowercase__ = original_model.encoder(__magic_name__ )
lowercase__ = model.encoder(__magic_name__ ).last_hidden_state
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-2 )
# verify decoder hidden states
lowercase__ = original_model(__magic_name__ , __magic_name__ , __magic_name__ ).logits
lowercase__ = model(__magic_name__ , decoder_input_ids=__magic_name__ ).logits
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
A : Optional[int] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 15 | 0 |
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def snake_case ( lowerCAmelCase_ ) -> int:
_snake_case = prime_factors(lowerCAmelCase_ )
if is_square_free(lowerCAmelCase_ ):
return -1 if len(lowerCAmelCase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 15 | 0 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def snake_case__ ( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
pass
def _lowerCamelCase ( UpperCAmelCase_ : Image ) -> str:
"""simple docstring"""
A__ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
A__ : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
A__ = DepthEstimationPipeline(model=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
A__ = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , SCREAMING_SNAKE_CASE__ )
import datasets
A__ = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
A__ = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , SCREAMING_SNAKE_CASE__ , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def snake_case__ ( self ) -> Dict:
pass
@slow
@require_torch
def snake_case__ ( self ) -> Optional[int]:
A__ = "Intel/dpt-large"
A__ = pipeline("depth-estimation" , model=SCREAMING_SNAKE_CASE__ )
A__ = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
A__ = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.6_6_2 )
@require_torch
def snake_case__ ( self ) -> str:
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 104 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Any , _UpperCAmelCase : NestedDataStructureLike[PathLike] , _UpperCAmelCase : Optional[NamedSplit] = None , _UpperCAmelCase : Optional[Features] = None , _UpperCAmelCase : str = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ) -> List[str]:
"""simple docstring"""
super().__init__(
_UpperCAmelCase , split=_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase , streaming=_UpperCAmelCase , num_proc=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase , data_files=_UpperCAmelCase , features=_UpperCAmelCase , **_UpperCAmelCase , )
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase , download_mode=_UpperCAmelCase , verification_mode=_UpperCAmelCase , base_path=_UpperCAmelCase , num_proc=self.num_proc , )
lowercase__ = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 15 | 0 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(lowerCamelCase_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 105 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = """ylacombe/bark-small"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = """en_speaker_1"""
lowercase__ = """This is a test string"""
lowercase__ = """speaker_embeddings_path.json"""
lowercase__ = """speaker_embeddings"""
def lowerCamelCase__ (self : str , **_UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = BarkProcessor(tokenizer=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase__ (self : str ) -> Tuple:
"""simple docstring"""
lowercase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase__ = 35
lowercase__ = 2
lowercase__ = 8
lowercase__ = {
"""semantic_prompt""": np.ones(_UpperCAmelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase__ = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
lowercase__ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase__ = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
lowercase__ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase__ = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = BarkProcessor(tokenizer=_UpperCAmelCase )
lowercase__ = processor(text=self.input_string )
lowercase__ = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 15 | 0 |
from math import sqrt
def lowerCamelCase_ ( lowerCAmelCase__ : int ) -> bool:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
A = True
# 0 and 1 are none primes.
if number <= 1:
A = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
A = False
break
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'status' must been from type bool"
return status
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple ) -> str:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
A = list(range(2 , n + 1 ) )
A = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase__ ) ):
for j in range(i + 1 , len(lowerCAmelCase__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
A = 0
# filters actual prime numbers.
A = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'ans' must been from type list"
return ans
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> Dict:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (n > 2), "'N' must been an int and > 2"
A = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase__ ):
ans.append(lowerCAmelCase__ )
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'ans' must been from type list"
return ans
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int] ) -> Dict:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and number >= 0, "'number' must been an int and >= 0"
A = [] # this list will be returns of the function.
# potential prime number factors.
A = 2
A = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase__ ):
while quotient != 1:
if is_prime(lowerCAmelCase__ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase__ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase__ )
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'ans' must been from type list"
return ans
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] ) -> Tuple:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
A = 0
# prime factorization of 'number'
A = prime_factorization(lowerCAmelCase__ )
A = max(lowerCAmelCase__ )
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'ans' must been from type int"
return ans
def lowerCamelCase_ ( lowerCAmelCase__ : int ) -> List[Any]:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
A = 0
# prime factorization of 'number'
A = prime_factorization(lowerCAmelCase__ )
A = min(lowerCAmelCase__ )
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'ans' must been from type int"
return ans
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase__ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple ) -> Any:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase__ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int] ) -> int:
'''simple docstring'''
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (number > 2) and is_even(lowerCAmelCase__ )
), "'number' must been an int, even and > 2"
A = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
A = get_prime_numbers(lowerCAmelCase__ )
A = len(lowerCAmelCase__ )
# run variable for while-loops.
A = 0
A = None
# exit variable. for break up the loops
A = True
while i < len_pn and loop:
A = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
A = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and (len(lowerCAmelCase__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
A = 0
while numbera != 0:
A = numbera % numbera
A = numbera
A = rest
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
A = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
A = prime_factorization(lowerCAmelCase__ )
A = prime_factorization(lowerCAmelCase__ )
elif numbera == 1 or numbera == 1:
A = []
A = []
A = max(lowerCAmelCase__ , lowerCAmelCase__ )
A = 0
A = 0
A = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
A = prime_fac_a.count(lowerCAmelCase__ )
A = prime_fac_a.count(lowerCAmelCase__ )
for _ in range(max(lowerCAmelCase__ , lowerCAmelCase__ ) ):
ans *= n
else:
A = prime_fac_a.count(lowerCAmelCase__ )
for _ in range(lowerCAmelCase__ ):
ans *= n
done.append(lowerCAmelCase__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
A = prime_fac_a.count(lowerCAmelCase__ )
for _ in range(lowerCAmelCase__ ):
ans *= n
done.append(lowerCAmelCase__ )
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple ) -> Any:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (n >= 0), "'number' must been a positive int"
A = 0
A = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase__ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and is_prime(
lowerCAmelCase__ ), "'ans' must been a prime number and from type int"
return ans
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str ) -> Optional[int]:
'''simple docstring'''
assert (
is_prime(lowerCAmelCase__ ) and is_prime(lowerCAmelCase__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
A = p_number_a + 1 # jump to the next number
A = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase__ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase__ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase__ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] ) -> List[str]:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (n >= 1), "'n' must been int and >= 1"
A = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase__ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number > 1
), "'number' must been an int and >= 1"
A = get_divisors(lowerCAmelCase__ )
# precondition
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] ) -> str:
'''simple docstring'''
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
A = gcd(abs(lowerCAmelCase__ ) , abs(lowerCAmelCase__ ) )
# precondition
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCamelCase_ ( lowerCAmelCase__ : Dict ) -> int:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (n >= 0), "'n' must been a int and >= 0"
A = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCamelCase_ ( lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (n >= 0), "'n' must been an int and >= 0"
A = 0
A = 1
A = 1 # this will be return
for _ in range(n - 1 ):
A = ans
ans += fiba
A = tmp
return ans | 106 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
A : Optional[Any] = logging.get_logger(__name__)
A : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A : Any = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : Optional[int] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : int = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
A : Optional[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
A : Any = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
A : Union[str, Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
A : Tuple = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
A : Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ = DPRContextEncoderTokenizer
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ = DPRQuestionEncoderTokenizer
A : Any = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
A : Optional[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
A : int = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(UpperCAmelCase__ )
class A :
'''simple docstring'''
def __call__(self : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Union[bool, str] = False , _UpperCAmelCase : Union[bool, str] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[bool] = None , **_UpperCAmelCase : Any , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
elif titles is None or texts is None:
lowercase__ = titles if texts is None else texts
return super().__call__(
_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = titles if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [titles]
lowercase__ = texts if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [texts]
lowercase__ = len(_UpperCAmelCase )
lowercase__ = questions if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [questions] * n_passages
assert len(_UpperCAmelCase ) == len(
_UpperCAmelCase ), f'''There should be as many titles than texts but got {len(_UpperCAmelCase )} titles and {len(_UpperCAmelCase )} texts.'''
lowercase__ = super().__call__(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["""input_ids"""]
lowercase__ = super().__call__(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["""input_ids"""]
lowercase__ = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_UpperCAmelCase , _UpperCAmelCase )
]
}
if return_attention_mask is not False:
lowercase__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase__ = attention_mask
return self.pad(_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : BatchEncoding , _UpperCAmelCase : DPRReaderOutput , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
lowercase__ = reader_input["""input_ids"""]
lowercase__ , lowercase__ , lowercase__ = reader_output[:3]
lowercase__ = len(_UpperCAmelCase )
lowercase__ = sorted(range(_UpperCAmelCase ) , reverse=_UpperCAmelCase , key=relevance_logits.__getitem__ )
lowercase__ = []
for doc_id in sorted_docs:
lowercase__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase__ = sequence_ids.index(self.pad_token_id )
else:
lowercase__ = len(_UpperCAmelCase )
lowercase__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_UpperCAmelCase , top_spans=_UpperCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_UpperCAmelCase , start_index=_UpperCAmelCase , end_index=_UpperCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_UpperCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : int , _UpperCAmelCase : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
lowercase__ = []
for start_index, start_score in enumerate(_UpperCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase__ = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
lowercase__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
lowercase__ = end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_UpperCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class A ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = READER_PRETRAINED_VOCAB_FILES_MAP
A__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = READER_PRETRAINED_INIT_CONFIGURATION
A__ = ['''input_ids''', '''attention_mask''']
A__ = DPRReaderTokenizer
| 15 | 0 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
_UpperCAmelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase_ :
"""simple docstring"""
__lowerCAmelCase = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class lowercase_ :
"""simple docstring"""
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Train language if it is different from the evaluation language."} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__lowerCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def _SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , __snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(__snake_case )
datasets.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_A = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
_A = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_A = train_dataset.features['label'].names
if training_args.do_eval:
_A = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_A = eval_dataset.features['label'].names
if training_args.do_predict:
_A = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_A = predict_dataset.features['label'].names
# Labels
_A = len(__snake_case )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , idalabel={str(__snake_case ): label for i, label in enumerate(__snake_case )} , labelaid={label: i for i, label in enumerate(__snake_case )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_A = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_A = False
def preprocess_function(__snake_case : List[Any] ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=__snake_case , max_length=data_args.max_seq_length , truncation=__snake_case , )
if training_args.do_train:
if data_args.max_train_samples is not None:
_A = min(len(__snake_case ) , data_args.max_train_samples )
_A = train_dataset.select(range(__snake_case ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_A = train_dataset.map(
__snake_case , batched=__snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__snake_case ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_A = min(len(__snake_case ) , data_args.max_eval_samples )
_A = eval_dataset.select(range(__snake_case ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_A = eval_dataset.map(
__snake_case , batched=__snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_A = min(len(__snake_case ) , data_args.max_predict_samples )
_A = predict_dataset.select(range(__snake_case ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
_A = predict_dataset.map(
__snake_case , batched=__snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
_A = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__snake_case : EvalPrediction ):
_A = p.predictions[0] if isinstance(p.predictions , __snake_case ) else p.predictions
_A = np.argmax(__snake_case , axis=1 )
return metric.compute(predictions=__snake_case , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_A = default_data_collator
elif training_args.fpaa:
_A = DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 )
else:
_A = None
# Initialize our Trainer
_A = Trainer(
model=__snake_case , args=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__snake_case , tokenizer=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=__snake_case )
_A = train_result.metrics
_A = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
_A = min(__snake_case , len(__snake_case ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , __snake_case )
trainer.save_metrics('train' , __snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_A = trainer.evaluate(eval_dataset=__snake_case )
_A = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__snake_case )
_A = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('eval' , __snake_case )
trainer.save_metrics('eval' , __snake_case )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
_A , _A , _A = trainer.predict(__snake_case , metric_key_prefix='predict' )
_A = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__snake_case )
)
_A = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('predict' , __snake_case )
trainer.save_metrics('predict' , __snake_case )
_A = np.argmax(__snake_case , axis=1 )
_A = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(__snake_case , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(__snake_case ):
_A = label_list[item]
writer.write(F'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 107 |
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[int] ) -> list[int]: # This function is recursive
"""simple docstring"""
lowercase__ = len(__magic_name__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ = array[0]
lowercase__ = False
lowercase__ = 1
lowercase__ = []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ = True
lowercase__ = [element for element in array[i:] if element >= array[i]]
lowercase__ = longest_subsequence(__magic_name__ )
if len(__magic_name__ ) > len(__magic_name__ ):
lowercase__ = temp_array
else:
i += 1
lowercase__ = [element for element in array[1:] if element >= pivot]
lowercase__ = [pivot, *longest_subsequence(__magic_name__ )]
if len(__magic_name__ ) > len(__magic_name__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
) | 108 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
A : List[Any] = None
A : Optional[Any] = logging.get_logger(__name__)
A : str = '▁'
A : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
A : Any = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
A : Optional[int] = {
'google/pegasus-xsum': 5_1_2,
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = PegasusTokenizer
A__ = ['''input_ids''', '''attention_mask''']
def __init__(self : Tuple , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Union[str, Any]="<pad>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : int="<unk>" , _UpperCAmelCase : Optional[Any]="<mask_2>" , _UpperCAmelCase : Optional[Any]="<mask_1>" , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Dict=103 , **_UpperCAmelCase : Dict , ) -> Dict:
"""simple docstring"""
lowercase__ = offset
if additional_special_tokens is not None:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(_UpperCAmelCase )}, but is'''
f''' {type(_UpperCAmelCase )}''' )
lowercase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(_UpperCAmelCase ) , self.offset - 1 )
]
if len(set(_UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
lowercase__ = additional_special_tokens_extended
else:
lowercase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , pad_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , mask_token_sent=_UpperCAmelCase , offset=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List , _UpperCAmelCase : Optional[List] = None , _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(_UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(_UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCamelCase__ (self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ (self : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 15 | 0 |
'''simple docstring'''
from __future__ import annotations
class __a :
def __init__( self : Optional[int] ,lowerCamelCase : list[list[int]] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(lowerCamelCase ) != 0:
__SCREAMING_SNAKE_CASE = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCamelCase ) != cols:
raise error
for value in row:
if not isinstance(lowerCamelCase ,(int, float) ):
raise error
__SCREAMING_SNAKE_CASE = rows
else:
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return len(self.rows )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.rows[0] )
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return self.order[0] == self.order[1]
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCamelCase )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return bool(self.determinant() )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : int ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCamelCase ).determinant()
def UpperCAmelCase__ ( self : str ,lowerCamelCase : int ,lowerCamelCase : int ):
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(lowerCamelCase ,lowerCamelCase )
return -1 * self.get_minor(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return Matrix(
[
[self.get_minor(lowerCamelCase ,lowerCamelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self : List[Any] ):
'''simple docstring'''
return str(self.rows )
def __str__( self : List[str] ):
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(lowerCamelCase ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : list[int] ,lowerCamelCase : int | None = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(lowerCamelCase ,lowerCamelCase ):
raise type_error
for value in row:
if not isinstance(lowerCamelCase ,(int, float) ):
raise type_error
if len(lowerCamelCase ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE = self.rows[0:position] + [row] + self.rows[position:]
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : list[int] ,lowerCamelCase : int | None = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(lowerCamelCase ,lowerCamelCase ):
raise type_error
for value in column:
if not isinstance(lowerCamelCase ,(int, float) ):
raise type_error
if len(lowerCamelCase ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
__SCREAMING_SNAKE_CASE = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__SCREAMING_SNAKE_CASE = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : int ,lowerCamelCase : object ):
'''simple docstring'''
if not isinstance(lowerCamelCase ,lowerCamelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any ,lowerCamelCase : object ):
'''simple docstring'''
return not self == other
def __neg__( self : Any ):
'''simple docstring'''
return self * -1
def __add__( self : List[Any] ,lowerCamelCase : Matrix ):
'''simple docstring'''
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Any ,lowerCamelCase : Matrix ):
'''simple docstring'''
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Any ,lowerCamelCase : Matrix | int | float ):
'''simple docstring'''
if isinstance(lowerCamelCase ,(int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCamelCase ,lowerCamelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(lowerCamelCase ,lowerCamelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self : Optional[int] ,lowerCamelCase : int ):
'''simple docstring'''
if not isinstance(lowerCamelCase ,lowerCamelCase ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
__SCREAMING_SNAKE_CASE = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def UpperCAmelCase__ ( cls : str ,lowerCamelCase : list[int] ,lowerCamelCase : list[int] ):
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
A : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
A : List[str] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
A : Any = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int = CHRF.CHAR_ORDER , _UpperCAmelCase : int = CHRF.WORD_ORDER , _UpperCAmelCase : int = CHRF.BETA , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , ) -> int:
"""simple docstring"""
lowercase__ = len(references[0] )
if any(len(_UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowercase__ = [[refs[i] for refs in references] for i in range(_UpperCAmelCase )]
lowercase__ = CHRF(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__ = sb_chrf.corpus_score(_UpperCAmelCase , _UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 15 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowercase , unittest.TestCase ):
UpperCamelCase : Tuple = CLIPTokenizer
UpperCamelCase : List[str] = CLIPTokenizerFast
UpperCamelCase : Tuple = True
UpperCamelCase : str = {}
UpperCamelCase : Tuple = False
def __snake_case ( self ):
super().setUp()
# fmt: off
UpperCAmelCase__ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCAmelCase__ : List[str] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
UpperCAmelCase__ : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
UpperCAmelCase__ : Optional[Any] = {'unk_token': '<unk>'}
UpperCAmelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase_ ) )
def __snake_case ( self , **UpperCamelCase_ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __snake_case ( self , **UpperCamelCase_ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : Any = 'lower newer'
UpperCAmelCase__ : Dict = 'lower newer'
return input_text, output_text
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase__ : Optional[int] = 'lower newer'
UpperCAmelCase__ : int = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
UpperCAmelCase__ : Dict = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase__ : int = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
@require_ftfy
def __snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
UpperCAmelCase__ : Dict = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
UpperCAmelCase__ : int = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
UpperCAmelCase__ : List[str] = tokenizer_s.tokenize(UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCAmelCase__ : Tuple = 'xa\u0303y' + ' ' + 'x\xe3y'
UpperCAmelCase__ : Dict = tokenizer_s.tokenize(UpperCamelCase_ )
UpperCAmelCase__ : List[str] = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Test that the tokenization is identical on unicode of space type
UpperCAmelCase__ : Tuple = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCAmelCase__ : Dict = tokenizer_s.tokenize(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Test that the tokenization is identical on unicode of line break type
UpperCAmelCase__ : Optional[Any] = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCAmelCase__ : List[Any] = tokenizer_s.tokenize(UpperCamelCase_ )
UpperCAmelCase__ : str = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def __snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase__ : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase__ : int = F'''{text_of_1_token} {text_of_1_token}'''
UpperCAmelCase__ : Dict = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , )
UpperCAmelCase__ : List[Any] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ) + 1, len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
UpperCAmelCase__ : Dict = F''' {text}'''
UpperCAmelCase__ : Dict = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , )
UpperCAmelCase__ : Tuple = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ) + 1, 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
def __snake_case ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(UpperCamelCase_ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def __snake_case ( self ):
super().test_tokenization_python_rust_equals()
def __snake_case ( self ):
# CLIP always lower cases letters
pass
| 110 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = PegasusTokenizer(_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def lowerCamelCase__ (self : Tuple , **_UpperCAmelCase : int ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase__ (self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = """</s>"""
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_UpperCAmelCase ) , 1103 )
def lowerCamelCase__ (self : str ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowerCamelCase__ (self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
lowercase__ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
lowercase__ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
lowercase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowercase__ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowercase__ = """To ensure a smooth flow of bank resolutions."""
lowercase__ = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowercase__ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = ["""This is going to be way too long.""" * 150, """short example"""]
lowercase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowercase__ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
lowercase__ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def lowerCamelCase__ (self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = PegasusTokenizer(_UpperCAmelCase , offset=0 , mask_token_sent=_UpperCAmelCase , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def lowerCamelCase__ (self : str , **_UpperCAmelCase : Union[str, Any] ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
lowercase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
lowercase__ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
lowercase__ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@require_torch
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = ["""This is going to be way too long.""" * 1000, """short example"""]
lowercase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowercase__ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
lowercase__ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
def lowerCamelCase__ (self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
lowercase__ = self._large_tokenizer(_UpperCAmelCase ).input_ids
self.assertListEqual(
_UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 15 | 0 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
UpperCAmelCase_ : int = 'Hello world! cécé herlolip'
UpperCAmelCase_ : Optional[int] = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : List[str] = BertAbsConfig(
temp_dir='.' , finetune_bert=A , large=A , share_emb=A , use_bert_emb=A , encoder='bert' , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
_a : str = torch.load(A , lambda A , A : storage )
_a : int = AbsSummarizer(A , torch.device('cpu' ) , A )
original.eval()
_a : str = BertAbsSummarizer(A , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
_a : str = BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
_a : List[str] = tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(A )) )
_a : Optional[int] = torch.tensor(A ).unsqueeze(0 )
_a : int = tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(A )) )
_a : Optional[Any] = torch.tensor(A ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_a : Optional[int] = encoder_input_ids
_a : Dict = decoder_input_ids
_a : List[str] = None
_a : str = None
_a : Dict = None
_a : str = None
_a : Tuple = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_a : int = original(A , A , A , A , A , A , A )[0]
_a : Any = original.generator(A )
_a : Union[str, Any] = new_model(
A , A , A , A , A )[0]
_a : int = new_model.generator(A )
_a : Union[str, Any] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(A ) )
_a : Union[str, Any] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(A ) )
_a : str = torch.allclose(A , A , atol=1E-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 120 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def UpperCamelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowercase__ = load_file(__magic_name__ )
lowercase__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowercase__ = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
lowercase__ = pipeline.text_encoder
else:
lowercase__ = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
lowercase__ = pipeline.unet
# find the target layer
lowercase__ = layer_infos.pop(0 )
while len(__magic_name__ ) > -1:
try:
lowercase__ = curr_layer.__getattr__(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase__ = layer_infos.pop(0 )
elif len(__magic_name__ ) == 0:
break
except Exception:
if len(__magic_name__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowercase__ = layer_infos.pop(0 )
lowercase__ = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(__magic_name__ )
else:
pair_keys.append(__magic_name__ )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowercase__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowercase__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__magic_name__ , __magic_name__ ).unsqueeze(2 ).unsqueeze(3 )
else:
lowercase__ = state_dict[pair_keys[0]].to(torch.floataa )
lowercase__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__magic_name__ , __magic_name__ )
# update visited list
for item in pair_keys:
visited.append(__magic_name__ )
return pipeline
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
A : str = parser.parse_args()
A : Tuple = args.base_model_path
A : List[str] = args.checkpoint_path
A : Optional[int] = args.dump_path
A : Optional[int] = args.lora_prefix_unet
A : Any = args.lora_prefix_text_encoder
A : Any = args.alpha
A : List[str] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
A : int = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 15 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( UpperCAmelCase__ ,unittest.TestCase ):
UpperCAmelCase__ = KandinskyVaaControlnetImgaImgPipeline
UpperCAmelCase__ = ["image_embeds", "negative_image_embeds", "image", "hint"]
UpperCAmelCase__ = ["image_embeds", "negative_image_embeds", "image", "hint"]
UpperCAmelCase__ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase__ = False
@property
def lowerCamelCase__ ( self : Tuple ) -> int:
return 3_2
@property
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
return 3_2
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
return self.time_input_dim
@property
def lowerCamelCase__ ( self : Any ) -> List[Any]:
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self : List[str] ) -> Any:
return 1_0_0
@property
def lowerCamelCase__ ( self : Optional[int] ) -> str:
torch.manual_seed(0 )
__magic_name__: Any = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__magic_name__: Optional[Any] = UNetaDConditionModel(**_UpperCAmelCase )
return model
@property
def lowerCamelCase__ ( self : Dict ) -> List[str]:
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ ( self : Any ) -> Dict:
torch.manual_seed(0 )
__magic_name__: List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ ( self : Tuple ) -> str:
__magic_name__: Union[str, Any] = self.dummy_unet
__magic_name__: Any = self.dummy_movq
__magic_name__: List[Any] = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__magic_name__: int = DDIMScheduler(**_UpperCAmelCase )
__magic_name__: Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase__ ( self : str , __snake_case : List[str] , __snake_case : int=0 ) -> Dict:
__magic_name__: List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
__magic_name__: Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCAmelCase )
# create init_image
__magic_name__: List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
__magic_name__: Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__: Dict = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
# create hint
__magic_name__: str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("""mps""" ):
__magic_name__: Optional[Any] = torch.manual_seed(_UpperCAmelCase )
else:
__magic_name__: Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__magic_name__: List[str] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self : int ) -> List[str]:
__magic_name__: int = """cpu"""
__magic_name__: Optional[int] = self.get_dummy_components()
__magic_name__: Any = self.pipeline_class(**_UpperCAmelCase )
__magic_name__: int = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__magic_name__: int = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
__magic_name__: Optional[int] = output.images
__magic_name__: str = pipe(
**self.get_dummy_inputs(_UpperCAmelCase ) , return_dict=_UpperCAmelCase , )[0]
__magic_name__: Optional[int] = image[0, -3:, -3:, -1]
__magic_name__: List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__magic_name__: Any = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
__magic_name__: Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
__magic_name__: Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__magic_name__: Dict = init_image.resize((5_1_2, 5_1_2) )
__magic_name__: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
__magic_name__: Any = torch.from_numpy(np.array(_UpperCAmelCase ) ).float() / 255.0
__magic_name__: List[str] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__magic_name__: List[str] = """A robot, 4k photo"""
__magic_name__: Union[str, Any] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCAmelCase )
__magic_name__: Any = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
__magic_name__: Optional[Any] = pipeline.to(_UpperCAmelCase )
pipeline.set_progress_bar_config(disable=_UpperCAmelCase )
__magic_name__: Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__magic_name__, __magic_name__: Optional[int] = pipe_prior(
_UpperCAmelCase , image=_UpperCAmelCase , strength=0.85 , generator=_UpperCAmelCase , negative_prompt="""""" , ).to_tuple()
__magic_name__: Tuple = pipeline(
image=_UpperCAmelCase , image_embeds=_UpperCAmelCase , negative_image_embeds=_UpperCAmelCase , hint=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type="""np""" , )
__magic_name__: Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 96 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Tuple = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''ibert'''
def __init__(self : int , _UpperCAmelCase : Union[str, Any]=3_0522 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[Any]=3072 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Dict=1E-1_2 , _UpperCAmelCase : int=1 , _UpperCAmelCase : str=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : int="absolute" , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[Any]="none" , **_UpperCAmelCase : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = quant_mode
lowercase__ = force_dequant
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase__ (self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 15 | 0 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
A : Union[str, Any] = 0B101_100_111_110_110_010_010_000_011_110_111_011_000_110_011_110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
A : Any = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = WATERMARK_BITS
SCREAMING_SNAKE_CASE_ = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark )
def __A ( self : Any , __magic_name__ : torch.FloatTensor ) -> Optional[Any]:
if images.shape[-1] < 256:
return images
SCREAMING_SNAKE_CASE_ = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE_ = [self.encoder.encode(_UpperCAmelCase , "dwtDct" ) for image in images]
SCREAMING_SNAKE_CASE_ = torch.from_numpy(np.array(_UpperCAmelCase ) ).permute(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE_ = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 140 |
from math import log
from scipy.constants import Boltzmann, physical_constants
A : Any = 3_0_0 # TEMPERATURE (unit = K)
def UpperCamelCase ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_UpperCamelCase : Union[str, Any] = pytest.mark.integration
@require_faiss
class _lowercase( UpperCAmelCase__ ):
"""simple docstring"""
def snake_case ( self: int ):
__UpperCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def snake_case ( self: Union[str, Any] ):
import faiss
__UpperCAmelCase = self._create_dummy_dataset()
__UpperCAmelCase = dset.map(
lambda a ,a : {"vecs": i * np.ones(5 ,dtype=np.floataa )} ,with_indices=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase )
__UpperCAmelCase = dset.add_faiss_index('vecs' ,batch_size=100 ,metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCAmelCase , __UpperCAmelCase = dset.get_nearest_examples('vecs' ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] ,'my_name-train_29' )
dset.drop_index('vecs' )
def snake_case ( self: Tuple ):
import faiss
__UpperCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name='vecs' ,batch_size=100 ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
__UpperCAmelCase , __UpperCAmelCase = dset.get_nearest_examples('vecs' ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] ,'my_name-train_29' )
def snake_case ( self: Union[str, Any] ):
import faiss
__UpperCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name='vecs' ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_UpperCAmelCase ) as tmp_file:
dset.save_faiss_index('vecs' ,tmp_file.name )
dset.load_faiss_index('vecs2' ,tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase , __UpperCAmelCase = dset.get_nearest_examples('vecs2' ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] ,'my_name-train_29' )
def snake_case ( self: Dict ):
__UpperCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_UpperCAmelCase ,partial(dset.get_nearest_examples ,'vecs2' ,np.ones(5 ,dtype=np.floataa ) ) )
def snake_case ( self: int ):
from elasticsearch import Elasticsearch
__UpperCAmelCase = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__UpperCAmelCase = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
__UpperCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
__UpperCAmelCase = Elasticsearch()
dset.add_elasticsearch_index('filename' ,es_client=_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase = dset.get_nearest_examples('filename' ,'my_name-train_29' )
self.assertEqual(examples['filename'][0] ,'my_name-train_29' )
@require_faiss
class _lowercase( UpperCAmelCase__ ):
"""simple docstring"""
def snake_case ( self: Any ):
import faiss
__UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal ,5 )
index.add_vectors(np.zeros((5, 5) ,dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal ,10 )
# single query
__UpperCAmelCase = np.zeros(5 ,dtype=np.floataa )
__UpperCAmelCase = 1
__UpperCAmelCase , __UpperCAmelCase = index.search(_UpperCAmelCase )
self.assertRaises(_UpperCAmelCase ,index.search ,query.reshape(-1 ,1 ) )
self.assertGreater(scores[0] ,0 )
self.assertEqual(indices[0] ,1 )
# batched queries
__UpperCAmelCase = np.eye(5 ,dtype=np.floataa )[::-1]
__UpperCAmelCase , __UpperCAmelCase = index.search_batch(_UpperCAmelCase )
self.assertRaises(_UpperCAmelCase ,index.search_batch ,queries[0] )
__UpperCAmelCase = [scores[0] for scores in total_scores]
__UpperCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_UpperCAmelCase ) ,0 )
self.assertListEqual([4, 3, 2, 1, 0] ,_UpperCAmelCase )
def snake_case ( self: List[str] ):
import faiss
__UpperCAmelCase = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat )
__UpperCAmelCase = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexLSH )
with self.assertRaises(_UpperCAmelCase ):
__UpperCAmelCase = FaissIndex(string_factory='Flat' ,custom_index=faiss.IndexFlat(5 ) )
def snake_case ( self: Optional[Any] ):
import faiss
__UpperCAmelCase = faiss.IndexFlat(5 )
__UpperCAmelCase = FaissIndex(custom_index=_UpperCAmelCase )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat )
def snake_case ( self: str ):
import faiss
__UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
__UpperCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase = np.zeros(5 ,dtype=np.floataa )
__UpperCAmelCase = 1
__UpperCAmelCase , __UpperCAmelCase = index.search(_UpperCAmelCase )
self.assertGreater(scores[0] ,0 )
self.assertEqual(indices[0] ,1 )
@require_faiss
def __snake_case ( lowerCAmelCase : Tuple ):
import faiss
__UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__UpperCAmelCase = 'index.faiss'
__UpperCAmelCase = F"""mock://{index_name}"""
index.save(lowerCAmelCase , storage_options=mockfs.storage_options )
__UpperCAmelCase = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options )
__UpperCAmelCase = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase = 1
__UpperCAmelCase , __UpperCAmelCase = index.search(lowerCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _lowercase( UpperCAmelCase__ ):
"""simple docstring"""
def snake_case ( self: Optional[int] ):
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__UpperCAmelCase = Elasticsearch()
__UpperCAmelCase = {'acknowledged': True}
__UpperCAmelCase = ElasticSearchIndex(es_client=_UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__UpperCAmelCase = 'foo'
__UpperCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__UpperCAmelCase , __UpperCAmelCase = index.search(_UpperCAmelCase )
self.assertEqual(scores[0] ,1 )
self.assertEqual(indices[0] ,0 )
# single query with timeout
__UpperCAmelCase = 'foo'
__UpperCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__UpperCAmelCase , __UpperCAmelCase = index.search(_UpperCAmelCase ,request_timeout=30 )
self.assertEqual(scores[0] ,1 )
self.assertEqual(indices[0] ,0 )
# batched queries
__UpperCAmelCase = ['foo', 'bar', 'foobar']
__UpperCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__UpperCAmelCase , __UpperCAmelCase = index.search_batch(_UpperCAmelCase )
__UpperCAmelCase = [scores[0] for scores in total_scores]
__UpperCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_UpperCAmelCase ) ,0 )
self.assertListEqual([1, 1, 1] ,_UpperCAmelCase )
# batched queries with timeout
__UpperCAmelCase = ['foo', 'bar', 'foobar']
__UpperCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__UpperCAmelCase , __UpperCAmelCase = index.search_batch(_UpperCAmelCase ,request_timeout=30 )
__UpperCAmelCase = [scores[0] for scores in total_scores]
__UpperCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_UpperCAmelCase ) ,0 )
self.assertListEqual([1, 1, 1] ,_UpperCAmelCase )
| 396 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class A :
'''simple docstring'''
A__ = 42
A__ = None
A__ = None
def UpperCamelCase ( ) -> Node | None:
"""simple docstring"""
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> int:
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCamelCase ( __magic_name__ : Node | None ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCamelCase ( __magic_name__ : Node | None , __magic_name__ : int ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
def populate_output(__magic_name__ : Node | None , __magic_name__ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__magic_name__ , __magic_name__ )
return output
def UpperCamelCase ( __magic_name__ : Node | None , __magic_name__ : int ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
def populate_output(__magic_name__ : Node | None , __magic_name__ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__magic_name__ , __magic_name__ )
return output
def UpperCamelCase ( __magic_name__ : Node | None ) -> Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(__magic_name__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__magic_name__ , __magic_name__ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(__magic_name__ , __magic_name__ ) )
lowercase__ = 0
return output
def UpperCamelCase ( ) -> None: # Main function for testing.
"""simple docstring"""
lowercase__ = make_tree()
print(f'''In-order Traversal: {inorder(__magic_name__ )}''' )
print(f'''Pre-order Traversal: {preorder(__magic_name__ )}''' )
print(f'''Post-order Traversal: {postorder(__magic_name__ )}''' , """\n""" )
print(f'''Height of Tree: {height(__magic_name__ )}''' , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__magic_name__ ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__magic_name__ ) + 1 ):
print(f'''Level {level}:''' , get_nodes_from_left_to_right(__magic_name__ , level=__magic_name__ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 15 | 0 |
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__A = logging.getLogger(__name__)
class _snake_case ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Any=None ):
super().__init__(
_UpperCAmelCase , question_encoder_tokenizer=_UpperCAmelCase , generator_tokenizer=_UpperCAmelCase , index=_UpperCAmelCase , init_retrieval=_UpperCAmelCase , )
__lowerCamelCase : Union[str, Any] = None
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : int ):
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
__lowerCamelCase : int = self._infer_socket_ifname()
# avoid clash with the NCCL port
__lowerCamelCase : Any = str(distributed_port + 1 )
__lowerCamelCase : str = dist.new_group(ranks=_UpperCAmelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowerCamelCase__ ( self : Optional[int] ):
return dist.get_rank(group=self.process_group ) == 0
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Tuple=torch.floataa ):
__lowerCamelCase : Dict = torch.empty(_UpperCAmelCase , dtype=_UpperCAmelCase )
dist.scatter(_UpperCAmelCase , src=0 , scatter_list=_UpperCAmelCase , group=self.process_group )
return target_tensor
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : Dict = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__lowerCamelCase : List[str] = next((addr for addr in addrs if addr.startswith("e" )) , _UpperCAmelCase )
return ifname
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : int ):
if not dist.is_initialized():
__lowerCamelCase , __lowerCamelCase : Tuple = self._main_retrieve(_UpperCAmelCase , _UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_UpperCAmelCase )
# distributed training
__lowerCamelCase : Any = dist.get_world_size(group=self.process_group )
# gather logic
__lowerCamelCase : Union[str, Any] = None
if self._is_main():
__lowerCamelCase : Any = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_UpperCAmelCase )]
dist.gather(torch.tensor(_UpperCAmelCase ) , dst=0 , gather_list=_UpperCAmelCase , group=self.process_group )
# scatter logic
__lowerCamelCase : Tuple = question_hidden_states.shape[0]
__lowerCamelCase : str = []
__lowerCamelCase : int = []
if self._is_main():
assert len(_UpperCAmelCase ) == world_size
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self._main_retrieve(torch.cat(_UpperCAmelCase ).numpy() , _UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : Any = torch.tensor(_UpperCAmelCase ), torch.tensor(_UpperCAmelCase )
__lowerCamelCase : Any = self._chunk_tensor(_UpperCAmelCase , _UpperCAmelCase )
__lowerCamelCase : Tuple = self._chunk_tensor(_UpperCAmelCase , _UpperCAmelCase )
__lowerCamelCase : List[Any] = self._scattered(_UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
__lowerCamelCase : Dict = self._scattered(_UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_UpperCAmelCase ) | 646 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Any = logging.get_logger(__name__)
A : Tuple = {
'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''poolformer'''
def __init__(self : Dict , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : str=16 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : Union[str, Any]=4.0 , _UpperCAmelCase : str=[2, 2, 6, 2] , _UpperCAmelCase : int=[64, 128, 320, 512] , _UpperCAmelCase : Union[str, Any]=[7, 3, 3, 3] , _UpperCAmelCase : List[Any]=[4, 2, 2, 2] , _UpperCAmelCase : Union[str, Any]=[2, 1, 1, 1] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=1E-5 , _UpperCAmelCase : Tuple=0.02 , **_UpperCAmelCase : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = stride
lowercase__ = padding
lowercase__ = pool_size
lowercase__ = hidden_sizes
lowercase__ = mlp_ratio
lowercase__ = depths
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = num_encoder_blocks
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_layer_scale
lowercase__ = layer_scale_init_value
lowercase__ = initializer_range
super().__init__(**_UpperCAmelCase )
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = version.parse('''1.11''' )
@property
def lowerCamelCase__ (self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ (self : Dict ) -> float:
"""simple docstring"""
return 2E-3
| 15 | 0 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
__A : Any = 300 # TEMPERATURE (unit = K)
def UpperCAmelCase ( lowerCamelCase_ :float , lowerCamelCase_ :float , lowerCamelCase_ :float , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 334 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@slow
@require_torch
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
lowercase__ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
lowercase__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowercase__ = bertabert.config.encoder.vocab_size
lowercase__ = tokenizer.sep_token_id
lowercase__ = tokenizer.cls_token_id
lowercase__ = 128
lowercase__ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
lowercase__ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
lowercase__ = train_dataset.select(range(32 ) )
lowercase__ = val_dataset.select(range(16 ) )
lowercase__ = 4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase : Any ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_UpperCAmelCase , max_length=512 )
lowercase__ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_UpperCAmelCase , max_length=128 )
lowercase__ = inputs.input_ids
lowercase__ = inputs.attention_mask
lowercase__ = outputs.input_ids
lowercase__ = outputs.input_ids.copy()
lowercase__ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
lowercase__ = outputs.attention_mask
assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase : List[Any] ):
lowercase__ = pred.label_ids
lowercase__ = pred.predictions
# all unnecessary tokens are removed
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
lowercase__ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
lowercase__ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
lowercase__ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = SeqaSeqTrainingArguments(
output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy="""steps""" , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# start training
trainer.train()
| 15 | 0 |
'''simple docstring'''
from __future__ import annotations
A = []
def UpperCAmelCase ( UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : int , UpperCAmelCase__ : int):
for i in range(len(UpperCAmelCase__)):
if board[row][i] == 1:
return False
for i in range(len(UpperCAmelCase__)):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCAmelCase__ , -1 , -1) , range(UpperCAmelCase__ , -1 , -1)):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCAmelCase__ , -1 , -1) , range(UpperCAmelCase__ , len(UpperCAmelCase__))):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase ( UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : int):
if row >= len(UpperCAmelCase__):
solution.append(UpperCAmelCase__)
printboard(UpperCAmelCase__)
print()
return True
for i in range(len(UpperCAmelCase__)):
if is_safe(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__):
lowerCamelCase : Optional[Any] = 1
solve(UpperCAmelCase__ , row + 1)
lowerCamelCase : Any = 0
return False
def UpperCAmelCase ( UpperCAmelCase__ : list[list[int]]):
for i in range(len(UpperCAmelCase__)):
for j in range(len(UpperCAmelCase__)):
if board[i][j] == 1:
print('Q' , end=' ')
else:
print('.' , end=' ')
print()
# n=int(input("The no. of queens"))
A = 8
A = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 320 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : Union[str, Any] = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = ['ConvNextFeatureExtractor']
A : Optional[Any] = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
A : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 15 | 0 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowercase_ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowercase_ = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def a ( ) -> Any:
"""simple docstring"""
_lowercase =(
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_lowercase =bs[:]
_lowercase =0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
_lowercase =[chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def a ( A__ : Any ) -> List[str]:
"""simple docstring"""
_lowercase =set()
_lowercase =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowercase =char
return pairs
class __lowerCAmelCase ( UpperCAmelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase="replace" , lowerCAmelCase="<s>" , lowerCAmelCase="</s>" , lowerCAmelCase="</s>" , lowerCAmelCase="<s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase="<mask>" , lowerCAmelCase=False , **lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
_lowercase =AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
_lowercase =AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
_lowercase =AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
_lowercase =AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
_lowercase =AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
_lowercase =AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowercase =AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
_lowercase =json.load(_UpperCAmelCase )
_lowercase ={v: k for k, v in self.encoder.items()}
_lowercase =errors # how to handle errors in decoding
_lowercase =bytes_to_unicode()
_lowercase ={v: k for k, v in self.byte_encoder.items()}
with open(_UpperCAmelCase , encoding='utf-8' ) as merges_handle:
_lowercase =merges_handle.read().split('\n' )[1:-1]
_lowercase =[tuple(merge.split() ) for merge in bpe_merges]
_lowercase =dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
_lowercase ={}
_lowercase =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowercase =re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def A__ ( self ) -> Tuple:
'''simple docstring'''
return len(self.encoder )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowercase =tuple(_UpperCAmelCase )
_lowercase =get_pairs(_UpperCAmelCase )
if not pairs:
return token
while True:
_lowercase =min(_UpperCAmelCase , key=lambda lowerCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowercase , _lowercase =bigram
_lowercase =[]
_lowercase =0
while i < len(_UpperCAmelCase ):
try:
_lowercase =word.index(_UpperCAmelCase , _UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowercase =j
if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowercase =tuple(_UpperCAmelCase )
_lowercase =new_word
if len(_UpperCAmelCase ) == 1:
break
else:
_lowercase =get_pairs(_UpperCAmelCase )
_lowercase =' '.join(_UpperCAmelCase )
_lowercase =word
return word
def A__ ( self , lowerCAmelCase ) -> int:
'''simple docstring'''
_lowercase =[]
for token in re.findall(self.pat , _UpperCAmelCase ):
_lowercase =''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase ).split(' ' ) )
return bpe_tokens
def A__ ( self , lowerCAmelCase ) -> int:
'''simple docstring'''
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self , lowerCAmelCase ) -> int:
'''simple docstring'''
return self.decoder.get(_UpperCAmelCase )
def A__ ( self , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase =''.join(_UpperCAmelCase )
_lowercase =bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowercase =os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowercase =os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + '\n' )
_lowercase =0
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_lowercase =token_index
writer.write(' '.join(_UpperCAmelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> List[int]:
'''simple docstring'''
_lowercase =[self.sep_token_id]
_lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self , lowerCAmelCase , lowerCAmelCase=False , **lowerCAmelCase ) -> Any:
'''simple docstring'''
_lowercase =kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase ) > 0 and not text[0].isspace()):
_lowercase =' ' + text
return (text, kwargs)
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> Any:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def A__ ( self , lowerCAmelCase ) -> List[int]:
'''simple docstring'''
_lowercase =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(_UpperCAmelCase )
_lowercase =' '.join(_UpperCAmelCase )
_lowercase =self.encode(_UpperCAmelCase )
if len(_UpperCAmelCase ) > self.model_max_length:
_lowercase =input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 291 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCamelCase ( __magic_name__ : Dict , __magic_name__ : List[str]=7 ) -> Dict:
"""simple docstring"""
lowercase__ = None
if token is not None:
lowercase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
lowercase__ = """636036"""
lowercase__ = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
lowercase__ = requests.get(__magic_name__ , headers=__magic_name__ ).json()
return result["workflow_runs"]
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
lowercase__ = get_daily_ci_runs(__magic_name__ )
lowercase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase__ = workflow_run["""id"""]
break
return workflow_run_id
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> str:
"""simple docstring"""
lowercase__ = get_last_daily_ci_runs(__magic_name__ )
if workflow_run_id is not None:
lowercase__ = get_artifacts_links(worflow_run_id=__magic_name__ , token=__magic_name__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=__magic_name__ , artifact_url=__magic_name__ , output_dir=__magic_name__ , token=__magic_name__ )
def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
get_last_daily_ci_artifacts(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = {}
for artifact_name in artifact_names:
lowercase__ = os.path.join(__magic_name__ , f'''{artifact_name}.zip''' )
if os.path.isfile(__magic_name__ ):
lowercase__ = {}
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
with z.open(__magic_name__ ) as f:
lowercase__ = f.read().decode("""UTF-8""" )
return results
| 15 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __SCREAMING_SNAKE_CASE (UpperCAmelCase__ ):
"""simple docstring"""
_a : List[Any] = ['''image_processor''', '''tokenizer''']
_a : Optional[Any] = '''AutoImageProcessor'''
_a : Dict = '''AutoTokenizer'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
a_ = self.image_processor
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
a_ = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
a_ = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
a_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def _a ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def _a ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def _a ( self ):
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 536 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class A ( unittest.TestCase ):
'''simple docstring'''
A__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase , top_k=2 )
lowercase__ = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
for example in examples:
lowercase__ = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{"""score""": ANY(_UpperCAmelCase ), """label""": ANY(_UpperCAmelCase )},
{"""score""": ANY(_UpperCAmelCase ), """label""": ANY(_UpperCAmelCase )},
] , )
@require_torch
def lowerCamelCase__ (self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
lowercase__ = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
lowercase__ = pipeline(
"""video-classification""" , model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , frame_sampling_rate=4 )
lowercase__ = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
lowercase__ = video_classifier(_UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}] , )
lowercase__ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
] , )
@require_tf
def lowerCamelCase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
| 15 | 0 |
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase = "" , lowercase = False ):
"""simple docstring"""
A_ : Dict = {}
# A node will be a leaf if the tree contains its word
A_ : Tuple = is_leaf
A_ : Optional[Any] = prefix
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = 0
for q, w in zip(self.prefix , _UpperCAmelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
for word in words:
self.insert(_UpperCAmelCase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if self.prefix == word:
A_ : Dict = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
A_ : List[Any] = RadixNode(prefix=_UpperCAmelCase , is_leaf=_UpperCAmelCase )
else:
A_ : Optional[Any] = self.nodes[word[0]]
A_ , A_ , A_ : Union[str, Any] = incoming_node.match(
_UpperCAmelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_UpperCAmelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
A_ : str = remaining_prefix
A_ : Tuple = self.nodes[matching_string[0]]
A_ : Dict = RadixNode(_UpperCAmelCase , _UpperCAmelCase )
A_ : List[str] = aux_node
if remaining_word == "":
A_ : List[Any] = True
else:
self.nodes[matching_string[0]].insert(_UpperCAmelCase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = self.nodes.get(word[0] , _UpperCAmelCase )
if not incoming_node:
return False
else:
A_ , A_ , A_ : List[str] = incoming_node.match(
_UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_UpperCAmelCase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Any = self.nodes.get(word[0] , _UpperCAmelCase )
if not incoming_node:
return False
else:
A_ , A_ , A_ : Optional[int] = incoming_node.match(
_UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_UpperCAmelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
A_ : List[Any] = list(self.nodes.values() )[0]
A_ : Any = merging_node.is_leaf
self.prefix += merging_node.prefix
A_ : Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
A_ : Any = False
# If there is 1 edge, we merge it with its child
else:
A_ : int = list(incoming_node.nodes.values() )[0]
A_ : int = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
A_ : List[str] = merging_node.nodes
return True
def lowerCAmelCase_ ( self , lowercase = 0 ):
"""simple docstring"""
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Dict = 'banana bananas bandana band apple all beast'.split()
A_ : List[Any] = RadixNode()
root.insert_many(__lowercase )
assert all(root.find(__lowercase ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def UpperCamelCase ( ):
'''simple docstring'''
assert test_trie()
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Any = RadixNode()
A_ : Optional[Any] = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(__lowercase )
print('Words:' ,__lowercase )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 558 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : Optional[Any] = 1_6
A : str = 3_2
def UpperCamelCase ( __magic_name__ : Accelerator , __magic_name__ : int = 16 ) -> List[str]:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__magic_name__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__magic_name__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
__magic_name__ , padding="""longest""" , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
lowercase__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A : Union[str, Any] = mocked_dataloaders # noqa: F811
def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __magic_name__ ) == "1":
lowercase__ = 2
# New Code #
lowercase__ = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowercase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__magic_name__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["""lr"""]
lowercase__ = int(config["""num_epochs"""] )
lowercase__ = int(config["""seed"""] )
lowercase__ = int(config["""batch_size"""] )
lowercase__ = evaluate.load("""glue""" , """mrpc""" )
set_seed(__magic_name__ )
lowercase__ , lowercase__ = get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=__magic_name__ )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=100 , num_training_steps=(len(__magic_name__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__magic_name__ ):
lowercase__ = model(**__magic_name__ )
lowercase__ = output.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**__magic_name__ )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __magic_name__ )
def UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__magic_name__ , default=__magic_name__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__magic_name__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowercase__ = parser.parse_args()
lowercase__ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 15 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _UpperCamelCase ( A , A , A , A , A , A = None , ):
UpperCamelCase_ ={}
if train_file is not None:
UpperCamelCase_ =[train_file]
if eval_file is not None:
UpperCamelCase_ =[eval_file]
if test_file is not None:
UpperCamelCase_ =[test_file]
UpperCamelCase_ =datasets.load_dataset("csv" , data_files=A )
UpperCamelCase_ =list(ds[list(files.keys() )[0]].features.keys() )
UpperCamelCase_ =features_name.pop(A )
UpperCamelCase_ =list(set(ds[list(files.keys() )[0]][label_name] ) )
UpperCamelCase_ ={label: i for i, label in enumerate(A )}
UpperCamelCase_ =tokenizer.model_input_names
UpperCamelCase_ ={}
if len(A ) == 1:
for k in files.keys():
UpperCamelCase_ =ds[k].map(
lambda A : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=A , max_length=A , padding="max_length" ) , batched=A , )
elif len(A ) == 2:
for k in files.keys():
UpperCamelCase_ =ds[k].map(
lambda A : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=A , max_length=A , padding="max_length" , ) , batched=A , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
UpperCamelCase_ ={k: v for k, v in ex.items() if k in input_names}
UpperCamelCase_ =labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
UpperCamelCase_ ={k: v for k, v in ex.items() if k in input_names}
UpperCamelCase_ =labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
UpperCamelCase_ ={k: v for k, v in ex.items() if k in input_names}
UpperCamelCase_ =labelaid[ex[label_name]]
yield (d, label)
UpperCamelCase_ =(
tf.data.Dataset.from_generator(
A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
UpperCamelCase_ =train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
UpperCamelCase_ =(
tf.data.Dataset.from_generator(
A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
UpperCamelCase_ =val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
UpperCamelCase_ =(
tf.data.Dataset.from_generator(
A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
UpperCamelCase_ =test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
A_ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : List[Any] = field(metadata={"help": "Which column contains the label"} )
__lowerCamelCase : Any = field(default=UpperCAmelCase__ , metadata={"help": "The path of the training file"} )
__lowerCamelCase : Any = field(default=UpperCAmelCase__ , metadata={"help": "The path of the development file"} )
__lowerCamelCase : Union[str, Any] = field(default=UpperCAmelCase__ , metadata={"help": "The path of the test file"} )
__lowerCamelCase : Optional[int] = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__lowerCamelCase : Any = field(
default=UpperCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : Tuple = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__lowerCamelCase : Any = field(
default=UpperCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__lowerCamelCase : Tuple = field(
default=UpperCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__lowerCamelCase : Optional[Any] = field(default=UpperCAmelCase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowerCamelCase : Any = field(
default=UpperCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def _UpperCamelCase ( ):
UpperCamelCase_ =HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_ =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=A , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
UpperCamelCase_ =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(A ) , labelaid=A , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
UpperCamelCase_ =TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
def compute_metrics(A ) -> Dict:
UpperCamelCase_ =np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
UpperCamelCase_ =TFTrainer(
model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase_ ={}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCamelCase_ =trainer.evaluate()
UpperCamelCase_ =os.path.join(training_args.output_dir , "eval_results.txt" )
with open(A , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(A )
return results
if __name__ == "__main__":
main()
| 391 |
import os
import sys
A : Tuple = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
A : Dict = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : List[Any] ) -> str:
"""simple docstring"""
return AutoConfig.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCamelCase ( *__magic_name__ : Any , **__magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return AutoModel.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCamelCase ( *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCamelCase ( *__magic_name__ : Any , **__magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCamelCase ( *__magic_name__ : Dict , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*__magic_name__ , **__magic_name__ )
| 15 | 0 |
'''simple docstring'''
UpperCAmelCase_ : List[str] = [
(1000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a : Optional[Any] = {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0}
_a : Optional[int] = 0
_a : Optional[int] = 0
while place < len(A ):
if (place + 1 < len(A )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a : List[Any] = []
for arabic, roman in ROMAN:
((_a) , (_a)) : Dict = divmod(A , A )
result.append(roman * factor )
if number == 0:
break
return "".join(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any=13 , _UpperCAmelCase : str=7 , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Optional[int]=99 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Dict="last" , _UpperCAmelCase : Any=None , _UpperCAmelCase : int=None , ) -> int:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_lengths
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = gelu_activation
lowercase__ = sinusoidal_embeddings
lowercase__ = causal
lowercase__ = asm
lowercase__ = n_langs
lowercase__ = vocab_size
lowercase__ = n_special
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = summary_type
lowercase__ = use_proj
lowercase__ = scope
def lowerCamelCase__ (self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_input_lengths:
lowercase__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , 2 ).float()
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCamelCase__ (self : int ) -> Dict:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = FlaubertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , lengths=_UpperCAmelCase , langs=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , langs=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = FlaubertWithLMHeadModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = FlaubertForQuestionAnsweringSimple(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , ) -> List[Any]:
"""simple docstring"""
lowercase__ = FlaubertForQuestionAnswering(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , p_mask=_UpperCAmelCase , )
lowercase__ = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , )
((lowercase__) , ) = result_with_labels.to_tuple()
lowercase__ = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
((lowercase__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , ) -> List[str]:
"""simple docstring"""
lowercase__ = FlaubertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , ) -> str:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = FlaubertForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , ) -> List[str]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = FlaubertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
A__ = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCamelCase__ (self : Any , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
lowercase__ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def lowerCamelCase__ (self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = FlaubertModelTester(self )
lowercase__ = ConfigTester(self , config_class=_UpperCAmelCase , emb_dim=37 )
def lowerCamelCase__ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_UpperCAmelCase )
@slow
def lowerCamelCase__ (self : int ) -> List[Any]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = FlaubertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@slow
@require_torch_gpu
def lowerCamelCase__ (self : Dict ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ = True
lowercase__ = model_class(config=_UpperCAmelCase )
lowercase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = torch.jit.trace(
_UpperCAmelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , """traced_model.pt""" ) )
lowercase__ = torch.jit.load(os.path.join(_UpperCAmelCase , """traced_model.pt""" ) , map_location=_UpperCAmelCase )
loaded(inputs_dict["""input_ids"""].to(_UpperCAmelCase ) , inputs_dict["""attention_mask"""].to(_UpperCAmelCase ) )
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase__ (self : Tuple ) -> Dict:
"""simple docstring"""
lowercase__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowercase__ = model(_UpperCAmelCase )[0]
lowercase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__ = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 15 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a ( ) -> Optional[Any]:
__magic_name__: Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__UpperCAmelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__UpperCAmelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__UpperCAmelCase )
return parser.parse_args()
def a ( ) -> Union[str, Any]:
__magic_name__: List[str] = parse_args()
# Import training_script as a module.
__magic_name__: List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__magic_name__: List[str] = script_fpath.stem
__magic_name__: Tuple = importlib.import_module(__UpperCAmelCase )
# Patch sys.argv
__magic_name__: Optional[int] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 96 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase ( __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
lowercase__ = emb.weight.data
return lin_layer
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str="facebook/mbart-large-en-ro" , __magic_name__ : Any=False , __magic_name__ : int=False ) -> Optional[int]:
"""simple docstring"""
lowercase__ = torch.load(__magic_name__ , map_location="""cpu""" )["""model"""]
remove_ignore_keys_(__magic_name__ )
lowercase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0]
lowercase__ = MBartConfig.from_pretrained(__magic_name__ , vocab_size=__magic_name__ )
if mbart_aa and finetuned:
lowercase__ = """relu"""
lowercase__ = state_dict["""decoder.embed_tokens.weight"""]
lowercase__ = MBartForConditionalGeneration(__magic_name__ )
model.model.load_state_dict(__magic_name__ )
if finetuned:
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
A : Any = parser.parse_args()
A : str = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 15 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
A : Optional[Any] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
SCREAMING_SNAKE_CASE_ = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCAmelCase , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def __A ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE_ = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def __A ( self : Dict , __magic_name__ : int , __magic_name__ : int , __magic_name__ : str , __magic_name__ : str=None ) -> int:
SCREAMING_SNAKE_CASE_ = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE_ = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
SCREAMING_SNAKE_CASE_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
SCREAMING_SNAKE_CASE_ = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = os.path.join(self.diffusers_dir , "new_code.py" )
with open(_UpperCAmelCase , "w" , newline="\n" ) as f:
f.write(_UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase )
with open(_UpperCAmelCase , "r" ) as f:
self.assertTrue(f.read() , _UpperCAmelCase )
def __A ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE_ = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def __A ( self : Optional[int] ) -> Optional[int]:
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , _UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , _UpperCAmelCase ) , )
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE_ = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , _UpperCAmelCase , _UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , _UpperCAmelCase , overwrite_result=re.sub("DDPM" , "Test" , _UpperCAmelCase ) , )
| 140 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def UpperCamelCase ( __magic_name__ : Any ) -> int:
"""simple docstring"""
lowercase__ = model.config
lowercase__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
lowercase__ = MBartConfig(
is_decoder=__magic_name__ , is_encoder_decoder=__magic_name__ , add_cross_attention=__magic_name__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__magic_name__ , add_final_layer_norm=__magic_name__ , )
return encoder_config, decoder_config
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if "encoder.model" in name:
lowercase__ = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
lowercase__ = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
lowercase__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
lowercase__ = """encoder.""" + name
if "attn.proj" in name:
lowercase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
lowercase__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
lowercase__ = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
lowercase__ = """encoder.layernorm.bias"""
return name
def UpperCamelCase ( __magic_name__ : Any , __magic_name__ : str ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase__ = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
lowercase__ = key.split(""".""" )
lowercase__ = int(key_split[3] )
lowercase__ = int(key_split[5] )
lowercase__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[dim : dim * 2, :]
lowercase__ = val[-dim:, :]
else:
lowercase__ = val[:dim]
lowercase__ = val[dim : dim * 2]
lowercase__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowercase__ = val
return orig_state_dict
def UpperCamelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : List[Any]=None , __magic_name__ : Dict=False ) -> int:
"""simple docstring"""
lowercase__ = DonutModel.from_pretrained(__magic_name__ ).eval()
# load HuggingFace model
lowercase__ , lowercase__ = get_configs(__magic_name__ )
lowercase__ = DonutSwinModel(__magic_name__ )
lowercase__ = MBartForCausalLM(__magic_name__ )
lowercase__ = VisionEncoderDecoderModel(encoder=__magic_name__ , decoder=__magic_name__ )
model.eval()
lowercase__ = original_model.state_dict()
lowercase__ = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# verify results on scanned document
lowercase__ = load_dataset("""hf-internal-testing/example-documents""" )
lowercase__ = dataset["""test"""][0]["""image"""].convert("""RGB""" )
lowercase__ = XLMRobertaTokenizerFast.from_pretrained(__magic_name__ , from_slow=__magic_name__ )
lowercase__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowercase__ = DonutProcessor(__magic_name__ , __magic_name__ )
lowercase__ = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowercase__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowercase__ = """When is the coffee break?"""
lowercase__ = task_prompt.replace("""{user_input}""" , __magic_name__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowercase__ = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowercase__ = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowercase__ = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowercase__ = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowercase__ = """hello world"""
else:
raise ValueError("""Model name not supported""" )
lowercase__ = original_model.decoder.tokenizer(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors="""pt""" )[
"""input_ids"""
]
lowercase__ = original_model.encoder.model.patch_embed(__magic_name__ )
lowercase__ , lowercase__ = model.encoder.embeddings(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 )
# verify encoder hidden states
lowercase__ = original_model.encoder(__magic_name__ )
lowercase__ = model.encoder(__magic_name__ ).last_hidden_state
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-2 )
# verify decoder hidden states
lowercase__ = original_model(__magic_name__ , __magic_name__ , __magic_name__ ).logits
lowercase__ = model(__magic_name__ , decoder_input_ids=__magic_name__ ).logits
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
A : Optional[int] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 15 | 0 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 396 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 15 | 0 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class _snake_case ( unittest.TestCase ):
snake_case__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple ):
__lowerCamelCase : List[Any] = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
__lowerCamelCase : Any = VideoClassificationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase , top_k=2 )
__lowerCamelCase : Optional[int] = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ):
for example in examples:
__lowerCamelCase : Optional[int] = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
] , )
@require_torch
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : int = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
__lowerCamelCase : Any = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
__lowerCamelCase : List[Any] = pipeline(
"video-classification" , model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , frame_sampling_rate=4 )
__lowerCamelCase : List[Any] = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
__lowerCamelCase : str = video_classifier(_UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [{"score": 0.5_1_9_9, "label": "LABEL_0"}, {"score": 0.4_8_0_1, "label": "LABEL_1"}] , )
__lowerCamelCase : Dict = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
[{"score": 0.5_1_9_9, "label": "LABEL_0"}, {"score": 0.4_8_0_1, "label": "LABEL_1"}],
[{"score": 0.5_1_9_9, "label": "LABEL_0"}, {"score": 0.4_8_0_1, "label": "LABEL_1"}],
] , )
@require_tf
def lowerCamelCase__ ( self : Any ):
pass | 646 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Any , _UpperCAmelCase : NestedDataStructureLike[PathLike] , _UpperCAmelCase : Optional[NamedSplit] = None , _UpperCAmelCase : Optional[Features] = None , _UpperCAmelCase : str = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ) -> List[str]:
"""simple docstring"""
super().__init__(
_UpperCAmelCase , split=_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase , streaming=_UpperCAmelCase , num_proc=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase , data_files=_UpperCAmelCase , features=_UpperCAmelCase , **_UpperCAmelCase , )
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase , download_mode=_UpperCAmelCase , verification_mode=_UpperCAmelCase , base_path=_UpperCAmelCase , num_proc=self.num_proc , )
lowercase__ = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 15 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : List[str] = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __UpperCamelCase ( UpperCAmelCase__ ):
lowercase : Union[str, Any] = 'cvt'
def __init__( self :str ,_UpperCamelCase :str=3 ,_UpperCamelCase :str=[7, 3, 3] ,_UpperCamelCase :Any=[4, 2, 2] ,_UpperCamelCase :Any=[2, 1, 1] ,_UpperCamelCase :Any=[6_4, 1_9_2, 3_8_4] ,_UpperCamelCase :Optional[Any]=[1, 3, 6] ,_UpperCamelCase :Tuple=[1, 2, 1_0] ,_UpperCamelCase :List[str]=[4.0, 4.0, 4.0] ,_UpperCamelCase :Any=[0.0, 0.0, 0.0] ,_UpperCamelCase :Any=[0.0, 0.0, 0.0] ,_UpperCamelCase :Optional[Any]=[0.0, 0.0, 0.1] ,_UpperCamelCase :List[str]=[True, True, True] ,_UpperCamelCase :str=[False, False, True] ,_UpperCamelCase :Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] ,_UpperCamelCase :List[Any]=[3, 3, 3] ,_UpperCamelCase :List[str]=[1, 1, 1] ,_UpperCamelCase :List[Any]=[2, 2, 2] ,_UpperCamelCase :Dict=[1, 1, 1] ,_UpperCamelCase :List[Any]=[1, 1, 1] ,_UpperCamelCase :int=0.02 ,_UpperCamelCase :str=1E-1_2 ,**_UpperCamelCase :Optional[Any] ,):
super().__init__(**_UpperCAmelCase )
snake_case_ : int = num_channels
snake_case_ : List[Any] = patch_sizes
snake_case_ : Union[str, Any] = patch_stride
snake_case_ : List[Any] = patch_padding
snake_case_ : Dict = embed_dim
snake_case_ : Optional[int] = num_heads
snake_case_ : Union[str, Any] = depth
snake_case_ : Dict = mlp_ratio
snake_case_ : List[str] = attention_drop_rate
snake_case_ : int = drop_rate
snake_case_ : List[str] = drop_path_rate
snake_case_ : List[str] = qkv_bias
snake_case_ : Union[str, Any] = cls_token
snake_case_ : Dict = qkv_projection_method
snake_case_ : Union[str, Any] = kernel_qkv
snake_case_ : int = padding_kv
snake_case_ : Any = stride_kv
snake_case_ : Optional[Any] = padding_q
snake_case_ : List[str] = stride_q
snake_case_ : List[Any] = initializer_range
snake_case_ : Tuple = layer_norm_eps | 334 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = """ylacombe/bark-small"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = """en_speaker_1"""
lowercase__ = """This is a test string"""
lowercase__ = """speaker_embeddings_path.json"""
lowercase__ = """speaker_embeddings"""
def lowerCamelCase__ (self : str , **_UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = BarkProcessor(tokenizer=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase__ (self : str ) -> Tuple:
"""simple docstring"""
lowercase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase__ = 35
lowercase__ = 2
lowercase__ = 8
lowercase__ = {
"""semantic_prompt""": np.ones(_UpperCAmelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase__ = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
lowercase__ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase__ = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
lowercase__ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase__ = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = BarkProcessor(tokenizer=_UpperCAmelCase )
lowercase__ = processor(text=self.input_string )
lowercase__ = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 15 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCAmelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : int=None , ):
if attention_mask is None:
lowerCamelCase : Tuple = np.where(input_ids != config.pad_token_id , 1 , 0)
if decoder_attention_mask is None:
lowerCamelCase : Dict = np.where(decoder_input_ids != config.pad_token_id , 1 , 0)
if head_mask is None:
lowerCamelCase : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
lowerCamelCase : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
lowerCamelCase : Any = np.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __snake_case :
def __init__( self, A, A=13, A=7, A=True, A=False, A=99, A=16, A=2, A=4, A=4, A="gelu", A=0.1, A=0.1, A=32, A=2, A=1, A=0, A=0.02, ):
"""simple docstring"""
lowerCamelCase : Dict = parent
lowerCamelCase : List[str] = batch_size
lowerCamelCase : Optional[Any] = seq_length
lowerCamelCase : Optional[Any] = is_training
lowerCamelCase : str = use_labels
lowerCamelCase : Union[str, Any] = vocab_size
lowerCamelCase : Union[str, Any] = hidden_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : str = num_attention_heads
lowerCamelCase : Optional[int] = intermediate_size
lowerCamelCase : Tuple = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : int = attention_probs_dropout_prob
lowerCamelCase : Optional[Any] = max_position_embeddings
lowerCamelCase : List[str] = eos_token_id
lowerCamelCase : Union[str, Any] = pad_token_id
lowerCamelCase : Tuple = bos_token_id
lowerCamelCase : Optional[Any] = initializer_range
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ), 3, self.vocab_size )
lowerCamelCase : Any = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.intaa )), -1 )
lowerCamelCase : List[str] = shift_tokens_right(_UpperCAmelCase, 1, 2 )
lowerCamelCase : Optional[int] = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=_UpperCAmelCase, )
lowerCamelCase : Dict = prepare_blenderbot_inputs_dict(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return config, inputs_dict
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : str = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase_ ( self, A, A, A ):
"""simple docstring"""
lowerCamelCase : List[Any] = 20
lowerCamelCase : str = model_class_name(_UpperCAmelCase )
lowerCamelCase : Union[str, Any] = model.encode(inputs_dict['input_ids'] )
lowerCamelCase , lowerCamelCase : List[str] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
lowerCamelCase : List[str] = model.init_cache(decoder_input_ids.shape[0], _UpperCAmelCase, _UpperCAmelCase )
lowerCamelCase : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype='i4' )
lowerCamelCase : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
lowerCamelCase : int = model.decode(
decoder_input_ids[:, :-1], _UpperCAmelCase, decoder_attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase, decoder_position_ids=_UpperCAmelCase, )
lowerCamelCase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='i4' )
lowerCamelCase : Optional[Any] = model.decode(
decoder_input_ids[:, -1:], _UpperCAmelCase, decoder_attention_mask=_UpperCAmelCase, past_key_values=outputs_cache.past_key_values, decoder_position_ids=_UpperCAmelCase, )
lowerCamelCase : Optional[Any] = model.decode(_UpperCAmelCase, _UpperCAmelCase )
lowerCamelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3, msg=F'''Max diff is {diff}''' )
def UpperCAmelCase_ ( self, A, A, A ):
"""simple docstring"""
lowerCamelCase : Dict = 20
lowerCamelCase : Any = model_class_name(_UpperCAmelCase )
lowerCamelCase : Any = model.encode(inputs_dict['input_ids'] )
lowerCamelCase , lowerCamelCase : str = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
lowerCamelCase : List[str] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
], axis=-1, )
lowerCamelCase : Any = model.init_cache(decoder_input_ids.shape[0], _UpperCAmelCase, _UpperCAmelCase )
lowerCamelCase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
lowerCamelCase : int = model.decode(
decoder_input_ids[:, :-1], _UpperCAmelCase, decoder_attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase, decoder_position_ids=_UpperCAmelCase, )
lowerCamelCase : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='i4' )
lowerCamelCase : Tuple = model.decode(
decoder_input_ids[:, -1:], _UpperCAmelCase, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=_UpperCAmelCase, decoder_position_ids=_UpperCAmelCase, )
lowerCamelCase : List[str] = model.decode(_UpperCAmelCase, _UpperCAmelCase, decoder_attention_mask=_UpperCAmelCase )
lowerCamelCase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3, msg=F'''Max diff is {diff}''' )
@require_flax
class __snake_case ( unittest.TestCase):
_lowerCAmelCase = 99
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
], dtype=np.intaa, )
lowerCamelCase : Optional[Any] = input_ids.shape[0]
lowerCamelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase , lowerCamelCase : str = self._get_config_and_data()
lowerCamelCase : str = FlaxBlenderbotForConditionalGeneration(_UpperCAmelCase )
lowerCamelCase : Optional[Any] = lm_model(input_ids=_UpperCAmelCase )
lowerCamelCase : List[str] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape, _UpperCAmelCase )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, )
lowerCamelCase : int = FlaxBlenderbotForConditionalGeneration(_UpperCAmelCase )
lowerCamelCase : Optional[Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.intaa )
lowerCamelCase : int = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.intaa )
lowerCamelCase : int = lm_model(input_ids=_UpperCAmelCase, decoder_input_ids=_UpperCAmelCase )
lowerCamelCase : Dict = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape, _UpperCAmelCase )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Dict = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.intaa )
lowerCamelCase : str = shift_tokens_right(_UpperCAmelCase, 1, 2 )
lowerCamelCase : Union[str, Any] = np.equal(_UpperCAmelCase, 1 ).astype(np.floataa ).sum()
lowerCamelCase : Tuple = np.equal(_UpperCAmelCase, 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape, input_ids.shape )
self.assertEqual(_UpperCAmelCase, n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0], 2 ).all() )
@require_flax
class __snake_case ( UpperCAmelCase__ , unittest.TestCase , UpperCAmelCase__):
_lowerCAmelCase = True
_lowerCAmelCase = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_lowerCAmelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = FlaxBlenderbotModelTester(self )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
lowerCamelCase : Dict = model_class(_UpperCAmelCase )
@jax.jit
def encode_jitted(A, A=None, **A ):
return model.encode(input_ids=_UpperCAmelCase, attention_mask=_UpperCAmelCase )
with self.subTest('JIT Enabled' ):
lowerCamelCase : Optional[int] = encode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCamelCase : Optional[int] = encode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase, _UpperCAmelCase ):
self.assertEqual(jitted_output.shape, output.shape )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase : Optional[Any] = model_class(_UpperCAmelCase )
lowerCamelCase : Any = model.encode(inputs_dict['input_ids'], inputs_dict['attention_mask'] )
lowerCamelCase : Optional[int] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(A, A, A ):
return model.decode(
decoder_input_ids=_UpperCAmelCase, decoder_attention_mask=_UpperCAmelCase, encoder_outputs=_UpperCAmelCase, )
with self.subTest('JIT Enabled' ):
lowerCamelCase : Dict = decode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCamelCase : int = decode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase, _UpperCAmelCase ):
self.assertEqual(jitted_output.shape, output.shape )
@slow
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase : str = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase : Union[str, Any] = np.ones((1, 1) ) * model.config.eos_token_id
lowerCamelCase : Dict = model(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skipUnless(jax_device != 'cpu', '3B test too slow on CPU.' )
@slow
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : int = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
lowerCamelCase : Any = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
lowerCamelCase : List[Any] = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B', from_pt=_UpperCAmelCase )
lowerCamelCase : Union[str, Any] = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
lowerCamelCase : Tuple = ['Sam']
lowerCamelCase : Union[str, Any] = tokenizer(_UpperCAmelCase, return_tensors='jax' )
lowerCamelCase : str = model.generate(**_UpperCAmelCase, **_UpperCAmelCase )
lowerCamelCase : List[Any] = 'Sam is a great name. It means \"sun\" in Gaelic.'
lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(_UpperCAmelCase, **_UpperCAmelCase )
assert generated_txt[0].strip() == tgt_text
| 320 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
A : Optional[Any] = logging.get_logger(__name__)
A : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A : Any = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : Optional[int] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : int = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
A : Optional[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
A : Any = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
A : Union[str, Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
A : Tuple = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
A : Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ = DPRContextEncoderTokenizer
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ = DPRQuestionEncoderTokenizer
A : Any = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
A : Optional[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
A : int = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(UpperCAmelCase__ )
class A :
'''simple docstring'''
def __call__(self : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Union[bool, str] = False , _UpperCAmelCase : Union[bool, str] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[bool] = None , **_UpperCAmelCase : Any , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
elif titles is None or texts is None:
lowercase__ = titles if texts is None else texts
return super().__call__(
_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = titles if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [titles]
lowercase__ = texts if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [texts]
lowercase__ = len(_UpperCAmelCase )
lowercase__ = questions if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [questions] * n_passages
assert len(_UpperCAmelCase ) == len(
_UpperCAmelCase ), f'''There should be as many titles than texts but got {len(_UpperCAmelCase )} titles and {len(_UpperCAmelCase )} texts.'''
lowercase__ = super().__call__(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["""input_ids"""]
lowercase__ = super().__call__(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["""input_ids"""]
lowercase__ = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_UpperCAmelCase , _UpperCAmelCase )
]
}
if return_attention_mask is not False:
lowercase__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase__ = attention_mask
return self.pad(_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : BatchEncoding , _UpperCAmelCase : DPRReaderOutput , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
lowercase__ = reader_input["""input_ids"""]
lowercase__ , lowercase__ , lowercase__ = reader_output[:3]
lowercase__ = len(_UpperCAmelCase )
lowercase__ = sorted(range(_UpperCAmelCase ) , reverse=_UpperCAmelCase , key=relevance_logits.__getitem__ )
lowercase__ = []
for doc_id in sorted_docs:
lowercase__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase__ = sequence_ids.index(self.pad_token_id )
else:
lowercase__ = len(_UpperCAmelCase )
lowercase__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_UpperCAmelCase , top_spans=_UpperCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_UpperCAmelCase , start_index=_UpperCAmelCase , end_index=_UpperCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_UpperCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : int , _UpperCAmelCase : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
lowercase__ = []
for start_index, start_score in enumerate(_UpperCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase__ = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
lowercase__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
lowercase__ = end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_UpperCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class A ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = READER_PRETRAINED_VOCAB_FILES_MAP
A__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = READER_PRETRAINED_INIT_CONFIGURATION
A__ = ['''input_ids''', '''attention_mask''']
A__ = DPRReaderTokenizer
| 15 | 0 |
import doctest
from collections import deque
import numpy as np
class __lowerCAmelCase :
def __init__( self ) -> None:
'''simple docstring'''
_lowercase =[2, 1, 2, -1]
_lowercase =[1, 2, 3, 4]
def A__ ( self ) -> list[float]:
'''simple docstring'''
_lowercase =len(self.first_signal )
_lowercase =len(self.second_signal )
_lowercase =max(_UpperCAmelCase , _UpperCAmelCase )
# create a zero matrix of max_length x max_length
_lowercase =[[0] * max_length for i in range(_UpperCAmelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_UpperCAmelCase ):
_lowercase =deque(self.second_signal )
rotated_signal.rotate(_UpperCAmelCase )
for j, item in enumerate(_UpperCAmelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
_lowercase =np.matmul(np.transpose(_UpperCAmelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(_UpperCAmelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 291 |
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[int] ) -> list[int]: # This function is recursive
"""simple docstring"""
lowercase__ = len(__magic_name__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ = array[0]
lowercase__ = False
lowercase__ = 1
lowercase__ = []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ = True
lowercase__ = [element for element in array[i:] if element >= array[i]]
lowercase__ = longest_subsequence(__magic_name__ )
if len(__magic_name__ ) > len(__magic_name__ ):
lowercase__ = temp_array
else:
i += 1
lowercase__ = [element for element in array[1:] if element >= pivot]
lowercase__ = [pivot, *longest_subsequence(__magic_name__ )]
if len(__magic_name__ ) > len(__magic_name__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
'''simple docstring'''
def __UpperCamelCase ( lowercase_ : int , lowercase_ : int ):
"""simple docstring"""
a_ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
a_ = n - k
# Calculate C(n,k)
for i in range(lowercase_ ):
result *= n - i
result //= i + 1
return result
def __UpperCamelCase ( lowercase_ : int ):
"""simple docstring"""
return binomial_coefficient(2 * node_count , lowercase_ ) // (node_count + 1)
def __UpperCamelCase ( lowercase_ : int ):
"""simple docstring"""
if n < 0:
raise ValueError('factorial() not defined for negative values' )
a_ = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __UpperCamelCase ( lowercase_ : int ):
"""simple docstring"""
return catalan_number(lowercase_ ) * factorial(lowercase_ )
if __name__ == "__main__":
__lowerCAmelCase = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
f"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
f"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 536 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
A : List[Any] = None
A : Optional[Any] = logging.get_logger(__name__)
A : str = '▁'
A : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
A : Any = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
A : Optional[int] = {
'google/pegasus-xsum': 5_1_2,
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = PegasusTokenizer
A__ = ['''input_ids''', '''attention_mask''']
def __init__(self : Tuple , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Union[str, Any]="<pad>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : int="<unk>" , _UpperCAmelCase : Optional[Any]="<mask_2>" , _UpperCAmelCase : Optional[Any]="<mask_1>" , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Dict=103 , **_UpperCAmelCase : Dict , ) -> Dict:
"""simple docstring"""
lowercase__ = offset
if additional_special_tokens is not None:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(_UpperCAmelCase )}, but is'''
f''' {type(_UpperCAmelCase )}''' )
lowercase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(_UpperCAmelCase ) , self.offset - 1 )
]
if len(set(_UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
lowercase__ = additional_special_tokens_extended
else:
lowercase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , pad_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , mask_token_sent=_UpperCAmelCase , offset=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List , _UpperCAmelCase : Optional[List] = None , _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(_UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(_UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCamelCase__ (self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ (self : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 15 | 0 |
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ : List[str] = 1
for i in range(1 ,num + 1 ):
fact *= i
return fact
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ : int = 0
while number > 0:
A_ : List[str] = number % 10
sum_of_digits += last_digit
A_ : int = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def UpperCamelCase ( __lowercase : int = 1_00 ):
'''simple docstring'''
A_ : List[str] = factorial(__lowercase )
A_ : Any = split_and_add(__lowercase )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 558 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
A : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
A : List[str] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
A : Any = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int = CHRF.CHAR_ORDER , _UpperCAmelCase : int = CHRF.WORD_ORDER , _UpperCAmelCase : int = CHRF.BETA , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , ) -> int:
"""simple docstring"""
lowercase__ = len(references[0] )
if any(len(_UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowercase__ = [[refs[i] for refs in references] for i in range(_UpperCAmelCase )]
lowercase__ = CHRF(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__ = sb_chrf.corpus_score(_UpperCAmelCase , _UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 15 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A_ = ['small', 'medium', 'large']
A_ = 'lm_head.decoder.weight'
A_ = 'lm_head.weight'
def _UpperCamelCase ( A , A ):
UpperCamelCase_ =torch.load(A )
UpperCamelCase_ =d.pop(A )
os.makedirs(A , exist_ok=A )
torch.save(A , os.path.join(A , A ) )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
A_ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A_ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
A_ = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 391 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = PegasusTokenizer(_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def lowerCamelCase__ (self : Tuple , **_UpperCAmelCase : int ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase__ (self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = """</s>"""
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_UpperCAmelCase ) , 1103 )
def lowerCamelCase__ (self : str ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowerCamelCase__ (self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
lowercase__ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
lowercase__ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
lowercase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowercase__ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowercase__ = """To ensure a smooth flow of bank resolutions."""
lowercase__ = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowercase__ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = ["""This is going to be way too long.""" * 150, """short example"""]
lowercase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowercase__ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
lowercase__ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def lowerCamelCase__ (self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = PegasusTokenizer(_UpperCAmelCase , offset=0 , mask_token_sent=_UpperCAmelCase , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def lowerCamelCase__ (self : str , **_UpperCAmelCase : Union[str, Any] ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
lowercase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
lowercase__ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
lowercase__ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@require_torch
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = ["""This is going to be way too long.""" * 1000, """short example"""]
lowercase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowercase__ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
lowercase__ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
def lowerCamelCase__ (self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
lowercase__ = self._large_tokenizer(_UpperCAmelCase ).input_ids
self.assertListEqual(
_UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 15 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class a ( UpperCAmelCase__ ):
'''simple docstring'''
__lowerCAmelCase : List[Any] = """xlm-roberta"""
def __init__( self , lowerCamelCase_=3_0_5_2_2 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-12 , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=2 , lowerCamelCase_="absolute" , lowerCamelCase_=True , lowerCamelCase_=None , **lowerCamelCase_ , ) -> List[Any]:
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_a : List[str] = vocab_size
_a : Union[str, Any] = hidden_size
_a : Optional[Any] = num_hidden_layers
_a : int = num_attention_heads
_a : Tuple = hidden_act
_a : List[Any] = intermediate_size
_a : List[str] = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : Union[str, Any] = max_position_embeddings
_a : Optional[int] = type_vocab_size
_a : Optional[Any] = initializer_range
_a : Optional[int] = layer_norm_eps
_a : Tuple = position_embedding_type
_a : List[Any] = use_cache
_a : Tuple = classifier_dropout
class a ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_a : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 120 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def UpperCamelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowercase__ = load_file(__magic_name__ )
lowercase__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowercase__ = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
lowercase__ = pipeline.text_encoder
else:
lowercase__ = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
lowercase__ = pipeline.unet
# find the target layer
lowercase__ = layer_infos.pop(0 )
while len(__magic_name__ ) > -1:
try:
lowercase__ = curr_layer.__getattr__(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase__ = layer_infos.pop(0 )
elif len(__magic_name__ ) == 0:
break
except Exception:
if len(__magic_name__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowercase__ = layer_infos.pop(0 )
lowercase__ = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(__magic_name__ )
else:
pair_keys.append(__magic_name__ )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowercase__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowercase__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__magic_name__ , __magic_name__ ).unsqueeze(2 ).unsqueeze(3 )
else:
lowercase__ = state_dict[pair_keys[0]].to(torch.floataa )
lowercase__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__magic_name__ , __magic_name__ )
# update visited list
for item in pair_keys:
visited.append(__magic_name__ )
return pipeline
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
A : str = parser.parse_args()
A : Tuple = args.base_model_path
A : List[str] = args.checkpoint_path
A : Optional[int] = args.dump_path
A : Optional[int] = args.lora_prefix_unet
A : Any = args.lora_prefix_text_encoder
A : Any = args.alpha
A : List[str] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
A : int = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 15 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__lowerCamelCase = None
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCamelCase = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
__lowerCamelCase = {
'google/rembert': 2_56,
}
__lowerCamelCase = '▁'
class __A ( UpperCAmelCase__ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = RemBertTokenizer
def __init__( self : List[Any] , __snake_case : int=None , __snake_case : List[Any]=None , __snake_case : Union[str, Any]=True , __snake_case : Any=True , __snake_case : Any=False , __snake_case : int="[CLS]" , __snake_case : Dict="[SEP]" , __snake_case : Optional[int]="<unk>" , __snake_case : int="[SEP]" , __snake_case : int="<pad>" , __snake_case : str="[CLS]" , __snake_case : Union[str, Any]="[MASK]" , **__snake_case : int , ) -> Dict:
__magic_name__: Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , )
__magic_name__: Optional[int] = do_lower_case
__magic_name__: Any = remove_space
__magic_name__: Optional[Any] = keep_accents
__magic_name__: List[str] = vocab_file
__magic_name__: Dict = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
__magic_name__: Optional[int] = [self.sep_token_id]
__magic_name__: Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
__magic_name__: Optional[int] = [self.sep_token_id]
__magic_name__: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCAmelCase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCAmelCase ) )
return
__magic_name__: List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 96 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Tuple = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''ibert'''
def __init__(self : int , _UpperCAmelCase : Union[str, Any]=3_0522 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[Any]=3072 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Dict=1E-1_2 , _UpperCAmelCase : int=1 , _UpperCAmelCase : str=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : int="absolute" , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[Any]="none" , **_UpperCAmelCase : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = quant_mode
lowercase__ = force_dequant
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase__ (self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 15 | 0 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
A : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase (UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self : Dict , *__magic_name__ : List[str] , **__magic_name__ : List[str] ) -> None:
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 140 |
from math import log
from scipy.constants import Boltzmann, physical_constants
A : Any = 3_0_0 # TEMPERATURE (unit = K)
def UpperCamelCase ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCamelCase : Optional[Any] = 16
_UpperCamelCase : Optional[Any] = 32
def __snake_case ( lowerCAmelCase : Accelerator , lowerCAmelCase : DatasetDict , lowerCAmelCase : List[int] , lowerCAmelCase : List[int] , lowerCAmelCase : int = 16 ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
__UpperCAmelCase = DatasetDict(
{
'train': dataset['train'].select(lowerCAmelCase ),
'validation': dataset['train'].select(lowerCAmelCase ),
'test': dataset['validation'],
} )
def tokenize_function(lowerCAmelCase : Tuple ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase , max_length=lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase = datasets.map(
lowerCAmelCase , batched=lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCAmelCase : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
__UpperCAmelCase = 8
else:
__UpperCAmelCase = None
return tokenizer.pad(
lowerCAmelCase , padding='longest' , max_length=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
__UpperCAmelCase = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCAmelCase , collate_fn=lowerCAmelCase , batch_size=lowerCAmelCase )
__UpperCAmelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCAmelCase , collate_fn=lowerCAmelCase , batch_size=lowerCAmelCase )
__UpperCAmelCase = DataLoader(
tokenized_datasets['test'] , shuffle=lowerCAmelCase , collate_fn=lowerCAmelCase , batch_size=lowerCAmelCase )
return train_dataloader, eval_dataloader, test_dataloader
def __snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] ):
__UpperCAmelCase = []
# Download the dataset
__UpperCAmelCase = load_dataset('glue' , 'mrpc' )
# Create our splits
__UpperCAmelCase = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
__UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase = config['lr']
__UpperCAmelCase = int(config['num_epochs'] )
__UpperCAmelCase = int(config['seed'] )
__UpperCAmelCase = int(config['batch_size'] )
__UpperCAmelCase = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
__UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
__UpperCAmelCase = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase )
# New Code #
# Create our folds:
__UpperCAmelCase = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
__UpperCAmelCase = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_fold_dataloaders(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase = AdamW(params=model.parameters() , lr=lowerCAmelCase )
# Instantiate scheduler
__UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Now we train the model
for epoch in range(lowerCAmelCase ):
model.train()
for step, batch in enumerate(lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCAmelCase = model(**lowerCAmelCase )
__UpperCAmelCase = outputs.loss
__UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase = model(**lowerCAmelCase )
__UpperCAmelCase = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase , references=lowerCAmelCase , )
__UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase )
# New Code #
# We also run predictions on the test set at the very end
__UpperCAmelCase = []
for step, batch in enumerate(lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase = model(**lowerCAmelCase )
__UpperCAmelCase = outputs.logits
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCAmelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
__UpperCAmelCase = torch.cat(lowerCAmelCase , dim=0 )
__UpperCAmelCase = torch.stack(lowerCAmelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
__UpperCAmelCase = metric.compute(predictions=lowerCAmelCase , references=lowerCAmelCase )
accelerator.print('Average test metrics from all folds:' , lowerCAmelCase )
def __snake_case ( ):
__UpperCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCAmelCase , default=lowerCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=lowerCAmelCase , default=3 , help='The number of splits to perform across the dataset' )
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
main()
| 396 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class A :
'''simple docstring'''
A__ = 42
A__ = None
A__ = None
def UpperCamelCase ( ) -> Node | None:
"""simple docstring"""
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> int:
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCamelCase ( __magic_name__ : Node | None ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCamelCase ( __magic_name__ : Node | None , __magic_name__ : int ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
def populate_output(__magic_name__ : Node | None , __magic_name__ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__magic_name__ , __magic_name__ )
return output
def UpperCamelCase ( __magic_name__ : Node | None , __magic_name__ : int ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
def populate_output(__magic_name__ : Node | None , __magic_name__ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__magic_name__ , __magic_name__ )
return output
def UpperCamelCase ( __magic_name__ : Node | None ) -> Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(__magic_name__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__magic_name__ , __magic_name__ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(__magic_name__ , __magic_name__ ) )
lowercase__ = 0
return output
def UpperCamelCase ( ) -> None: # Main function for testing.
"""simple docstring"""
lowercase__ = make_tree()
print(f'''In-order Traversal: {inorder(__magic_name__ )}''' )
print(f'''Pre-order Traversal: {preorder(__magic_name__ )}''' )
print(f'''Post-order Traversal: {postorder(__magic_name__ )}''' , """\n""" )
print(f'''Height of Tree: {height(__magic_name__ )}''' , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__magic_name__ ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__magic_name__ ) + 1 ):
print(f'''Level {level}:''' , get_nodes_from_left_to_right(__magic_name__ , level=__magic_name__ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 15 | 0 |
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: Optional[Any] , _lowerCamelCase: Tuple ) -> int:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = ""
for i in table:
res += inp[i - 1]
return res
def lowercase_ ( _lowerCamelCase: int ) -> Optional[int]:
'''simple docstring'''
return data[1:] + data[0]
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: List[str] ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase : List[str] = ""
for i in range(len(_lowerCamelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: Dict ) -> Dict:
'''simple docstring'''
__lowerCamelCase : Optional[int] = int("0b" + data[0] + data[-1] , 2 )
__lowerCamelCase : int = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowercase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: Tuple , _lowerCamelCase: List[Any] , _lowerCamelCase: Any , _lowerCamelCase: List[str] ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase : Tuple = message[:4]
__lowerCamelCase : str = message[4:]
__lowerCamelCase : Union[str, Any] = apply_table(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Dict = xor(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Union[str, Any] = apply_sbox(_lowerCamelCase , temp[:4] ) # noqa: E741
__lowerCamelCase : Optional[Any] = apply_sbox(_lowerCamelCase , temp[4:] )
__lowerCamelCase : Dict = "0" * (2 - len(_lowerCamelCase )) + l # noqa: E741
__lowerCamelCase : Tuple = "0" * (2 - len(_lowerCamelCase )) + r
__lowerCamelCase : int = apply_table(l + r , _lowerCamelCase )
__lowerCamelCase : List[Any] = xor(_lowerCamelCase , _lowerCamelCase )
return temp + right
if __name__ == "__main__":
__A = input('''Enter 10 bit key: ''')
__A = input('''Enter 8 bit message: ''')
__A = [6, 3, 7, 4, 8, 5, 10, 9]
__A = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__A = [2, 4, 3, 1]
__A = [2, 6, 3, 1, 4, 8, 5, 7]
__A = [4, 1, 3, 5, 7, 2, 8, 6]
__A = [4, 1, 2, 3, 2, 3, 4, 1]
__A = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__A = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__A = apply_table(key, paa_table)
__A = temp[:5]
__A = temp[5:]
__A = left_shift(left)
__A = left_shift(right)
__A = apply_table(left + right, pa_table)
__A = left_shift(left)
__A = left_shift(right)
__A = left_shift(left)
__A = left_shift(right)
__A = apply_table(left + right, pa_table)
# encryption
__A = apply_table(message, IP)
__A = function(expansion, sa, sa, keya, temp)
__A = temp[4:] + temp[:4]
__A = function(expansion, sa, sa, keya, temp)
__A = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
__A = apply_table(CT, IP)
__A = function(expansion, sa, sa, keya, temp)
__A = temp[4:] + temp[:4]
__A = function(expansion, sa, sa, keya, temp)
__A = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT) | 646 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Any = logging.get_logger(__name__)
A : Tuple = {
'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''poolformer'''
def __init__(self : Dict , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : str=16 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : Union[str, Any]=4.0 , _UpperCAmelCase : str=[2, 2, 6, 2] , _UpperCAmelCase : int=[64, 128, 320, 512] , _UpperCAmelCase : Union[str, Any]=[7, 3, 3, 3] , _UpperCAmelCase : List[Any]=[4, 2, 2, 2] , _UpperCAmelCase : Union[str, Any]=[2, 1, 1, 1] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=1E-5 , _UpperCAmelCase : Tuple=0.02 , **_UpperCAmelCase : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = stride
lowercase__ = padding
lowercase__ = pool_size
lowercase__ = hidden_sizes
lowercase__ = mlp_ratio
lowercase__ = depths
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = num_encoder_blocks
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_layer_scale
lowercase__ = layer_scale_init_value
lowercase__ = initializer_range
super().__init__(**_UpperCAmelCase )
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = version.parse('''1.11''' )
@property
def lowerCamelCase__ (self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ (self : Dict ) -> float:
"""simple docstring"""
return 2E-3
| 15 | 0 |
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : List[str] = word.split()
def justify(lowerCamelCase_ :list , lowerCamelCase_ :int , lowerCamelCase_ :int ) -> str:
snake_case_ : Optional[int] = max_width - width
snake_case_ : Dict = len(lowerCamelCase_ )
if len(lowerCamelCase_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
snake_case_ : Optional[int] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
snake_case_ : Optional[Any] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
snake_case_ : Optional[Any] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(lowerCamelCase_ ):
num_spaces_between_words_list[i] += 1
snake_case_ : Dict = []
for i in range(lowerCamelCase_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(lowerCamelCase_ )
snake_case_ : Optional[Any] = []
snake_case_ : Optional[int] = []
snake_case_ : Any = 0
for word in words:
if width + len(lowerCamelCase_ ) + len(lowerCamelCase_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(lowerCamelCase_ )
width += len(lowerCamelCase_ )
else:
# justify the line and add it to result
answer.append(justify(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) )
# reset new line and new width
snake_case_ , snake_case_ : Optional[int] = [word], len(lowerCamelCase_ )
snake_case_ : str = max_width - width - len(lowerCamelCase_ )
answer.append(""" """.join(lowerCamelCase_ ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 334 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@slow
@require_torch
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
lowercase__ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
lowercase__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowercase__ = bertabert.config.encoder.vocab_size
lowercase__ = tokenizer.sep_token_id
lowercase__ = tokenizer.cls_token_id
lowercase__ = 128
lowercase__ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
lowercase__ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
lowercase__ = train_dataset.select(range(32 ) )
lowercase__ = val_dataset.select(range(16 ) )
lowercase__ = 4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase : Any ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_UpperCAmelCase , max_length=512 )
lowercase__ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_UpperCAmelCase , max_length=128 )
lowercase__ = inputs.input_ids
lowercase__ = inputs.attention_mask
lowercase__ = outputs.input_ids
lowercase__ = outputs.input_ids.copy()
lowercase__ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
lowercase__ = outputs.attention_mask
assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase : List[Any] ):
lowercase__ = pred.label_ids
lowercase__ = pred.predictions
# all unnecessary tokens are removed
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
lowercase__ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
lowercase__ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
lowercase__ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = SeqaSeqTrainingArguments(
output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy="""steps""" , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# start training
trainer.train()
| 15 | 0 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class __snake_case ( unittest.TestCase):
def UpperCAmelCase_ ( self, A, A, A = None, A = None ):
"""simple docstring"""
lowerCamelCase : List[str] = None
lowerCamelCase : Union[str, Any] = os.path.abspath(os.path.join('examples', 'by_feature' ) )
lowerCamelCase : Dict = os.path.abspath('examples' )
for item in os.listdir(_UpperCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase : Union[str, Any] = os.path.join(_UpperCAmelCase, _UpperCAmelCase )
if os.path.isfile(_UpperCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=_UpperCAmelCase, feature_script=_UpperCAmelCase, tested_section='main()' if parser_only else 'training_function()', ):
lowerCamelCase : Optional[int] = compare_against_test(
os.path.join(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
lowerCamelCase : int = '\n'.join(_UpperCAmelCase )
if special_strings is not None:
for string in special_strings:
lowerCamelCase : Optional[Any] = diff.replace(_UpperCAmelCase, '' )
self.assertEqual(_UpperCAmelCase, '' )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.one_complete_example('complete_nlp_example.py', _UpperCAmelCase )
self.one_complete_example('complete_nlp_example.py', _UpperCAmelCase )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Any = os.path.abspath(os.path.join('examples', 'cv_example.py' ) )
lowerCamelCase : str = [
' ' * 16 + '{\n\n',
' ' * 20 + '\"accuracy\": eval_metric[\"accuracy\"],\n\n',
' ' * 20 + '\"f1\": eval_metric[\"f1\"],\n\n',
' ' * 20 + '\"train_loss\": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '\"epoch\": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py', _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
self.one_complete_example('complete_cv_example.py', _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''})
class __snake_case ( UpperCAmelCase__):
_lowerCAmelCase = False
@classmethod
def UpperCAmelCase_ ( cls ):
"""simple docstring"""
super().setUpClass()
lowerCamelCase : Any = tempfile.mkdtemp()
lowerCamelCase : List[Any] = os.path.join(cls._tmpdir, 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase : str = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def UpperCAmelCase_ ( cls ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, 'epoch_0' ) ) )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
lowerCamelCase : Any = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, 'step_2' ) ) )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir, 'epoch_0' )}
'''.split()
lowerCamelCase : List[str] = run_command(self._launch_args + testargs, return_stdout=_UpperCAmelCase )
self.assertNotIn('epoch 0:', _UpperCAmelCase )
self.assertIn('epoch 1:', _UpperCAmelCase )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir, 'step_2' )}
'''.split()
lowerCamelCase : Optional[int] = run_command(self._launch_args + testargs, return_stdout=_UpperCAmelCase )
if torch.cuda.is_available():
lowerCamelCase : Union[str, Any] = torch.cuda.device_count()
else:
lowerCamelCase : int = 1
if num_processes > 1:
self.assertNotIn('epoch 0:', _UpperCAmelCase )
self.assertIn('epoch 1:', _UpperCAmelCase )
else:
self.assertIn('epoch 0:', _UpperCAmelCase )
self.assertIn('epoch 1:', _UpperCAmelCase )
@slow
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ, {'TESTING_MOCKED_DATALOADERS': '0'} ):
lowerCamelCase : Tuple = run_command(self._launch_args + testargs, return_stdout=_UpperCAmelCase )
lowerCamelCase : str = re.findall('({.+})', _UpperCAmelCase )
lowerCamelCase : Dict = [r for r in results if 'accuracy' in r][-1]
lowerCamelCase : Any = ast.literal_eval(_UpperCAmelCase )
self.assertGreaterEqual(results['accuracy'], 0.75 )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : str = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'} )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase : Optional[int] = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase, 'tracking' ) ) )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Any = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 320 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : Union[str, Any] = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = ['ConvNextFeatureExtractor']
A : Optional[Any] = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
A : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 15 | 0 |
from __future__ import annotations
lowercase_ = 'Muhammad Umer Farooq'
lowercase_ = 'MIT'
lowercase_ = '1.0.0'
lowercase_ = 'Muhammad Umer Farooq'
lowercase_ = '[email protected]'
lowercase_ = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __lowerCAmelCase ( UpperCAmelCase__ ):
def __init__( self , lowerCAmelCase ) -> None:
'''simple docstring'''
super().__init__()
_lowercase =[]
_lowercase =domain
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_lowercase =parse.urljoin(self.domain , _UpperCAmelCase )
self.urls.append(_UpperCAmelCase )
def a ( A__ : str ) -> str:
"""simple docstring"""
return ".".join(get_sub_domain_name(A__ ).split('.' )[-2:] )
def a ( A__ : str ) -> str:
"""simple docstring"""
return parse.urlparse(A__ ).netloc
def a ( A__ : str = "https://github.com" ) -> list[str]:
"""simple docstring"""
_lowercase =get_domain_name(A__ )
# Initialize the parser
_lowercase =Parser(A__ )
try:
# Open URL
_lowercase =requests.get(A__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_lowercase =set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_lowercase =requests.get(A__ )
# Get the valid email.
_lowercase =re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(A__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(A__ )
if __name__ == "__main__":
lowercase_ = emails_from_url('https://github.com')
print(f"{len(emails)} emails found:")
print('\n'.join(sorted(emails)))
| 291 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCamelCase ( __magic_name__ : Dict , __magic_name__ : List[str]=7 ) -> Dict:
"""simple docstring"""
lowercase__ = None
if token is not None:
lowercase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
lowercase__ = """636036"""
lowercase__ = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
lowercase__ = requests.get(__magic_name__ , headers=__magic_name__ ).json()
return result["workflow_runs"]
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
lowercase__ = get_daily_ci_runs(__magic_name__ )
lowercase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase__ = workflow_run["""id"""]
break
return workflow_run_id
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> str:
"""simple docstring"""
lowercase__ = get_last_daily_ci_runs(__magic_name__ )
if workflow_run_id is not None:
lowercase__ = get_artifacts_links(worflow_run_id=__magic_name__ , token=__magic_name__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=__magic_name__ , artifact_url=__magic_name__ , output_dir=__magic_name__ , token=__magic_name__ )
def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
get_last_daily_ci_artifacts(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = {}
for artifact_name in artifact_names:
lowercase__ = os.path.join(__magic_name__ , f'''{artifact_name}.zip''' )
if os.path.isfile(__magic_name__ ):
lowercase__ = {}
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
with z.open(__magic_name__ ) as f:
lowercase__ = f.read().decode("""UTF-8""" )
return results
| 15 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class __SCREAMING_SNAKE_CASE (UpperCAmelCase__ ):
"""simple docstring"""
_a : List[str] = '''ibert'''
def __init__( self , UpperCamelCase__=30_522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3_072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=False , UpperCamelCase__="none" , **UpperCamelCase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = quant_mode
a_ = force_dequant
class __SCREAMING_SNAKE_CASE (UpperCAmelCase__ ):
"""simple docstring"""
@property
def _a ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
a_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 536 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class A ( unittest.TestCase ):
'''simple docstring'''
A__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase , top_k=2 )
lowercase__ = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
for example in examples:
lowercase__ = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{"""score""": ANY(_UpperCAmelCase ), """label""": ANY(_UpperCAmelCase )},
{"""score""": ANY(_UpperCAmelCase ), """label""": ANY(_UpperCAmelCase )},
] , )
@require_torch
def lowerCamelCase__ (self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
lowercase__ = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
lowercase__ = pipeline(
"""video-classification""" , model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , frame_sampling_rate=4 )
lowercase__ = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
lowercase__ = video_classifier(_UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}] , )
lowercase__ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
] , )
@require_tf
def lowerCamelCase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
| 15 | 0 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ = ['''image_processor''', '''tokenizer''']
lowerCamelCase_ = '''BlipImageProcessor'''
lowerCamelCase_ = '''AutoTokenizer'''
def __init__( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
# add QFormer tokenizer
A_ : List[str] = qformer_tokenizer
def __call__( self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
A_ : Dict = BatchFeature()
if text is not None:
A_ : List[Any] = self.tokenizer(
text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
encoding.update(_UpperCAmelCase )
A_ : Union[str, Any] = self.qformer_tokenizer(
text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
A_ : Optional[Any] = qformer_text_encoding.pop('input_ids' )
A_ : Optional[Any] = qformer_text_encoding.pop('attention_mask' )
if images is not None:
A_ : Any = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase )
encoding.update(_UpperCAmelCase )
return encoding
def lowerCAmelCase_ ( self , *lowercase , **lowercase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self , *lowercase , **lowercase ):
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.tokenizer.model_input_names
A_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCAmelCase_ ( self , lowercase , **lowercase ):
"""simple docstring"""
if os.path.isfile(_UpperCAmelCase ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
A_ : Dict = os.path.join(_UpperCAmelCase , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(_UpperCAmelCase )
return super().save_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls , lowercase , **lowercase ):
"""simple docstring"""
A_ : int = AutoTokenizer.from_pretrained(_UpperCAmelCase , subfolder='qformer_tokenizer' )
A_ : str = cls._get_arguments_from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
args.append(_UpperCAmelCase )
return cls(*_UpperCAmelCase )
| 558 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : Optional[Any] = 1_6
A : str = 3_2
def UpperCamelCase ( __magic_name__ : Accelerator , __magic_name__ : int = 16 ) -> List[str]:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__magic_name__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__magic_name__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
__magic_name__ , padding="""longest""" , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
lowercase__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A : Union[str, Any] = mocked_dataloaders # noqa: F811
def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __magic_name__ ) == "1":
lowercase__ = 2
# New Code #
lowercase__ = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowercase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__magic_name__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["""lr"""]
lowercase__ = int(config["""num_epochs"""] )
lowercase__ = int(config["""seed"""] )
lowercase__ = int(config["""batch_size"""] )
lowercase__ = evaluate.load("""glue""" , """mrpc""" )
set_seed(__magic_name__ )
lowercase__ , lowercase__ = get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=__magic_name__ )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=100 , num_training_steps=(len(__magic_name__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__magic_name__ ):
lowercase__ = model(**__magic_name__ )
lowercase__ = output.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**__magic_name__ )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __magic_name__ )
def UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__magic_name__ , default=__magic_name__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__magic_name__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowercase__ = parser.parse_args()
lowercase__ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 15 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: int=18 , UpperCamelCase_: Union[str, Any]=30 , UpperCamelCase_: Optional[Any]=400 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: List[Any]=[0.5, 0.5, 0.5] , UpperCamelCase_: List[str]=[0.5, 0.5, 0.5] , ):
UpperCamelCase_ =size if size is not None else {"height": 18, "width": 18}
UpperCamelCase_ =parent
UpperCamelCase_ =batch_size
UpperCamelCase_ =num_channels
UpperCamelCase_ =image_size
UpperCamelCase_ =min_resolution
UpperCamelCase_ =max_resolution
UpperCamelCase_ =do_resize
UpperCamelCase_ =size
UpperCamelCase_ =do_normalize
UpperCamelCase_ =image_mean
UpperCamelCase_ =image_std
def UpperCamelCase__ ( self: Optional[int] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = DPTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =DPTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self: str ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self: Any ):
UpperCamelCase_ =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size" ) )
def UpperCamelCase__ ( self: Tuple ):
UpperCamelCase_ =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
UpperCamelCase_ =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def UpperCamelCase__ ( self: str ):
UpperCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCamelCase_ =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCamelCase_ =image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCamelCase__ ( self: Any ):
UpperCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
UpperCamelCase_ =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCamelCase_ =image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
UpperCamelCase_ =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCamelCase_ =image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 391 |
import os
import sys
A : Tuple = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
A : Dict = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : List[Any] ) -> str:
"""simple docstring"""
return AutoConfig.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCamelCase ( *__magic_name__ : Any , **__magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return AutoModel.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCamelCase ( *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCamelCase ( *__magic_name__ : Any , **__magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCamelCase ( *__magic_name__ : Dict , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*__magic_name__ , **__magic_name__ )
| 15 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=2 , lowerCamelCase_=9_9 , lowerCamelCase_=0 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_="last" , lowerCamelCase_=None , lowerCamelCase_=None , ) -> int:
_a : Tuple = parent
_a : Any = batch_size
_a : Tuple = seq_length
_a : Optional[Any] = is_training
_a : List[Any] = use_input_lengths
_a : Optional[Any] = use_token_type_ids
_a : int = use_labels
_a : int = gelu_activation
_a : int = sinusoidal_embeddings
_a : Optional[Any] = causal
_a : List[str] = asm
_a : Optional[Any] = n_langs
_a : List[str] = vocab_size
_a : Tuple = n_special
_a : Dict = hidden_size
_a : str = num_hidden_layers
_a : Any = num_attention_heads
_a : Optional[Any] = hidden_dropout_prob
_a : List[str] = attention_probs_dropout_prob
_a : List[str] = max_position_embeddings
_a : Optional[Any] = type_vocab_size
_a : Optional[Any] = type_sequence_label_size
_a : Dict = initializer_range
_a : Any = num_labels
_a : str = num_choices
_a : str = summary_type
_a : Optional[Any] = use_proj
_a : Optional[Any] = scope
def __UpperCamelCase ( self ) -> List[str]:
_a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[Any] = None
if self.use_input_lengths:
_a : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_a : str = None
if self.use_token_type_ids:
_a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_a : List[Any] = None
_a : Tuple = None
_a : List[str] = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : str = ids_tensor([self.batch_size] , 2 ).float()
_a : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_a : str = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __UpperCamelCase ( self ) -> Dict:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Optional[Any]:
_a : int = FlaubertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : int = model(_UpperCAmelCase , lengths=_UpperCAmelCase , langs=_UpperCAmelCase )
_a : Optional[int] = model(_UpperCAmelCase , langs=_UpperCAmelCase )
_a : Dict = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Optional[int]:
_a : int = FlaubertWithLMHeadModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Any = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Optional[Any]:
_a : int = FlaubertForQuestionAnsweringSimple(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Dict = model(_UpperCAmelCase )
_a : str = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> List[Any]:
_a : Dict = FlaubertForQuestionAnswering(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : str = model(_UpperCAmelCase )
_a : Tuple = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , p_mask=_UpperCAmelCase , )
_a : List[str] = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , )
((_a ) , ) : Optional[int] = result_with_labels.to_tuple()
_a : Optional[Any] = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
((_a ) , ) : Optional[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> List[str]:
_a : Optional[int] = FlaubertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Optional[Any] = model(_UpperCAmelCase )
_a : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> str:
_a : Dict = self.num_labels
_a : List[str] = FlaubertForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Optional[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> List[str]:
_a : int = self.num_choices
_a : List[Any] = FlaubertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : Dict = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self ) -> Optional[Any]:
_a : Optional[int] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Union[str, Any] = config_and_inputs
_a : Union[str, Any] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : str = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> Optional[int]:
_a : Optional[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_a : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
_a : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def __UpperCamelCase ( self ) -> Union[str, Any]:
_a : Union[str, Any] = FlaubertModelTester(self )
_a : Optional[int] = ConfigTester(self , config_class=_UpperCAmelCase , emb_dim=3_7 )
def __UpperCamelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> Optional[int]:
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_UpperCAmelCase )
def __UpperCamelCase ( self ) -> str:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_UpperCAmelCase )
def __UpperCamelCase ( self ) -> Tuple:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_UpperCAmelCase )
def __UpperCamelCase ( self ) -> Any:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_UpperCAmelCase )
def __UpperCamelCase ( self ) -> Optional[Any]:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_UpperCAmelCase )
def __UpperCamelCase ( self ) -> Union[str, Any]:
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_UpperCAmelCase )
def __UpperCamelCase ( self ) -> Optional[Any]:
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_UpperCAmelCase )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = FlaubertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@slow
@require_torch_gpu
def __UpperCamelCase ( self ) -> int:
_a , _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_a : Tuple = True
_a : int = model_class(config=_UpperCAmelCase )
_a : int = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
_a : List[Any] = torch.jit.trace(
_UpperCAmelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'traced_model.pt' ) )
_a : Union[str, Any] = torch.jit.load(os.path.join(_UpperCAmelCase , 'traced_model.pt' ) , map_location=_UpperCAmelCase )
loaded(inputs_dict['input_ids'].to(_UpperCAmelCase ) , inputs_dict['attention_mask'].to(_UpperCAmelCase ) )
@require_torch
class a ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self ) -> Dict:
_a : int = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
_a : List[Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
_a : Optional[Any] = model(_UpperCAmelCase )[0]
_a : int = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , _UpperCAmelCase )
_a : Dict = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 120 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any=13 , _UpperCAmelCase : str=7 , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Optional[int]=99 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Dict="last" , _UpperCAmelCase : Any=None , _UpperCAmelCase : int=None , ) -> int:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_lengths
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = gelu_activation
lowercase__ = sinusoidal_embeddings
lowercase__ = causal
lowercase__ = asm
lowercase__ = n_langs
lowercase__ = vocab_size
lowercase__ = n_special
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = summary_type
lowercase__ = use_proj
lowercase__ = scope
def lowerCamelCase__ (self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_input_lengths:
lowercase__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , 2 ).float()
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCamelCase__ (self : int ) -> Dict:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = FlaubertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , lengths=_UpperCAmelCase , langs=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , langs=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = FlaubertWithLMHeadModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = FlaubertForQuestionAnsweringSimple(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , ) -> List[Any]:
"""simple docstring"""
lowercase__ = FlaubertForQuestionAnswering(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , p_mask=_UpperCAmelCase , )
lowercase__ = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , )
((lowercase__) , ) = result_with_labels.to_tuple()
lowercase__ = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
((lowercase__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , ) -> List[str]:
"""simple docstring"""
lowercase__ = FlaubertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , ) -> str:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = FlaubertForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , ) -> List[str]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = FlaubertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
A__ = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCamelCase__ (self : Any , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
lowercase__ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def lowerCamelCase__ (self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = FlaubertModelTester(self )
lowercase__ = ConfigTester(self , config_class=_UpperCAmelCase , emb_dim=37 )
def lowerCamelCase__ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_UpperCAmelCase )
@slow
def lowerCamelCase__ (self : int ) -> List[Any]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = FlaubertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@slow
@require_torch_gpu
def lowerCamelCase__ (self : Dict ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ = True
lowercase__ = model_class(config=_UpperCAmelCase )
lowercase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = torch.jit.trace(
_UpperCAmelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , """traced_model.pt""" ) )
lowercase__ = torch.jit.load(os.path.join(_UpperCAmelCase , """traced_model.pt""" ) , map_location=_UpperCAmelCase )
loaded(inputs_dict["""input_ids"""].to(_UpperCAmelCase ) , inputs_dict["""attention_mask"""].to(_UpperCAmelCase ) )
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase__ (self : Tuple ) -> Dict:
"""simple docstring"""
lowercase__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowercase__ = model(_UpperCAmelCase )[0]
lowercase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__ = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 15 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 96 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase ( __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
lowercase__ = emb.weight.data
return lin_layer
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str="facebook/mbart-large-en-ro" , __magic_name__ : Any=False , __magic_name__ : int=False ) -> Optional[int]:
"""simple docstring"""
lowercase__ = torch.load(__magic_name__ , map_location="""cpu""" )["""model"""]
remove_ignore_keys_(__magic_name__ )
lowercase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0]
lowercase__ = MBartConfig.from_pretrained(__magic_name__ , vocab_size=__magic_name__ )
if mbart_aa and finetuned:
lowercase__ = """relu"""
lowercase__ = state_dict["""decoder.embed_tokens.weight"""]
lowercase__ = MBartForConditionalGeneration(__magic_name__ )
model.model.load_state_dict(__magic_name__ )
if finetuned:
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
A : Any = parser.parse_args()
A : str = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 15 | 0 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
A : Dict = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ):
SCREAMING_SNAKE_CASE_ = XLNetConfig.from_json_file(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
SCREAMING_SNAKE_CASE_ = finetuning_task
SCREAMING_SNAKE_CASE_ = GLUE_TASKS_NUM_LABELS[finetuning_task]
SCREAMING_SNAKE_CASE_ = XLNetForSequenceClassification(__UpperCamelCase )
elif "squad" in finetuning_task:
SCREAMING_SNAKE_CASE_ = finetuning_task
SCREAMING_SNAKE_CASE_ = XLNetForQuestionAnswering(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = XLNetLMHeadModel(__UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
print(F'''Save PyTorch model to {os.path.abspath(__UpperCamelCase )}''' )
torch.save(model.state_dict() , __UpperCamelCase )
print(F'''Save configuration file to {os.path.abspath(__UpperCamelCase )}''' )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
A : Any = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 140 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def UpperCamelCase ( __magic_name__ : Any ) -> int:
"""simple docstring"""
lowercase__ = model.config
lowercase__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
lowercase__ = MBartConfig(
is_decoder=__magic_name__ , is_encoder_decoder=__magic_name__ , add_cross_attention=__magic_name__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__magic_name__ , add_final_layer_norm=__magic_name__ , )
return encoder_config, decoder_config
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if "encoder.model" in name:
lowercase__ = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
lowercase__ = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
lowercase__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
lowercase__ = """encoder.""" + name
if "attn.proj" in name:
lowercase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
lowercase__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
lowercase__ = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
lowercase__ = """encoder.layernorm.bias"""
return name
def UpperCamelCase ( __magic_name__ : Any , __magic_name__ : str ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase__ = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
lowercase__ = key.split(""".""" )
lowercase__ = int(key_split[3] )
lowercase__ = int(key_split[5] )
lowercase__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[dim : dim * 2, :]
lowercase__ = val[-dim:, :]
else:
lowercase__ = val[:dim]
lowercase__ = val[dim : dim * 2]
lowercase__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowercase__ = val
return orig_state_dict
def UpperCamelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : List[Any]=None , __magic_name__ : Dict=False ) -> int:
"""simple docstring"""
lowercase__ = DonutModel.from_pretrained(__magic_name__ ).eval()
# load HuggingFace model
lowercase__ , lowercase__ = get_configs(__magic_name__ )
lowercase__ = DonutSwinModel(__magic_name__ )
lowercase__ = MBartForCausalLM(__magic_name__ )
lowercase__ = VisionEncoderDecoderModel(encoder=__magic_name__ , decoder=__magic_name__ )
model.eval()
lowercase__ = original_model.state_dict()
lowercase__ = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# verify results on scanned document
lowercase__ = load_dataset("""hf-internal-testing/example-documents""" )
lowercase__ = dataset["""test"""][0]["""image"""].convert("""RGB""" )
lowercase__ = XLMRobertaTokenizerFast.from_pretrained(__magic_name__ , from_slow=__magic_name__ )
lowercase__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowercase__ = DonutProcessor(__magic_name__ , __magic_name__ )
lowercase__ = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowercase__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowercase__ = """When is the coffee break?"""
lowercase__ = task_prompt.replace("""{user_input}""" , __magic_name__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowercase__ = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowercase__ = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowercase__ = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowercase__ = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowercase__ = """hello world"""
else:
raise ValueError("""Model name not supported""" )
lowercase__ = original_model.decoder.tokenizer(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors="""pt""" )[
"""input_ids"""
]
lowercase__ = original_model.encoder.model.patch_embed(__magic_name__ )
lowercase__ , lowercase__ = model.encoder.embeddings(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 )
# verify encoder hidden states
lowercase__ = original_model.encoder(__magic_name__ )
lowercase__ = model.encoder(__magic_name__ ).last_hidden_state
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-2 )
# verify decoder hidden states
lowercase__ = original_model(__magic_name__ , __magic_name__ , __magic_name__ ).logits
lowercase__ = model(__magic_name__ , decoder_input_ids=__magic_name__ ).logits
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
A : Optional[int] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 15 | 0 |
'''simple docstring'''
import socket
def __snake_case ( ):
__UpperCAmelCase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__UpperCAmelCase = socket.gethostname()
__UpperCAmelCase = 1_2312
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
__UpperCAmelCase = sock.recv(1024 )
if not data:
break
out_file.write(lowerCAmelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 396 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 15 | 0 |
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
__A = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: int , _lowerCamelCase: Optional[int] , _lowerCamelCase: str , _lowerCamelCase: Any ) -> int:
'''simple docstring'''
for attribute in key.split("." ):
__lowerCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
__lowerCamelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
__lowerCamelCase : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__lowerCamelCase : Union[str, Any] = value
elif weight_type == "weight_v":
__lowerCamelCase : List[Any] = value
elif weight_type == "bias":
__lowerCamelCase : Dict = value
else:
__lowerCamelCase : Tuple = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase : Any = []
__lowerCamelCase : List[Any] = fairseq_model.state_dict()
__lowerCamelCase : Any = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
__lowerCamelCase : int = True
else:
for key, mapped_key in MAPPING.items():
__lowerCamelCase : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
__lowerCamelCase : List[str] = True
if "*" in mapped_key:
__lowerCamelCase : List[str] = name.split(_lowerCamelCase )[0].split("." )[-2]
__lowerCamelCase : Union[str, Any] = mapped_key.replace("*" , _lowerCamelCase )
if "weight_g" in name:
__lowerCamelCase : str = "weight_g"
elif "weight_v" in name:
__lowerCamelCase : List[Any] = "weight_v"
elif "bias" in name:
__lowerCamelCase : List[str] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase : Any = "weight"
else:
__lowerCamelCase : Optional[int] = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowercase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: Any , _lowerCamelCase: str , _lowerCamelCase: Any , _lowerCamelCase: List[str] ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : Tuple = full_name.split("conv_layers." )[-1]
__lowerCamelCase : int = name.split("." )
__lowerCamelCase : int = int(items[0] )
__lowerCamelCase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase : Any = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase : Optional[int] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def lowercase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: Tuple , _lowerCamelCase: Any=None , _lowerCamelCase: Optional[int]=None , _lowerCamelCase: Optional[Any]=True ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
__lowerCamelCase : Optional[int] = UniSpeechSatConfig.from_pretrained(_lowerCamelCase )
else:
__lowerCamelCase : Any = UniSpeechSatConfig()
__lowerCamelCase : Tuple = ""
if is_finetuned:
__lowerCamelCase : Union[str, Any] = UniSpeechSatForCTC(_lowerCamelCase )
else:
__lowerCamelCase : int = UniSpeechSatForPreTraining(_lowerCamelCase )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
__lowerCamelCase : Dict = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__A = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 646 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Any , _UpperCAmelCase : NestedDataStructureLike[PathLike] , _UpperCAmelCase : Optional[NamedSplit] = None , _UpperCAmelCase : Optional[Features] = None , _UpperCAmelCase : str = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ) -> List[str]:
"""simple docstring"""
super().__init__(
_UpperCAmelCase , split=_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase , streaming=_UpperCAmelCase , num_proc=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase , data_files=_UpperCAmelCase , features=_UpperCAmelCase , **_UpperCAmelCase , )
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase , download_mode=_UpperCAmelCase , verification_mode=_UpperCAmelCase , base_path=_UpperCAmelCase , num_proc=self.num_proc , )
lowercase__ = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 15 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any=False ):
'''simple docstring'''
snake_case_ : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("""head""" ):
snake_case_ : List[Any] = """segformer.encoder.""" + key
if key.startswith("""backbone""" ):
snake_case_ : Union[str, Any] = key.replace("""backbone""" , """segformer.encoder""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
snake_case_ : Any = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
snake_case_ : Dict = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(lowerCamelCase_ )-1}''' )
if "norm" in key:
snake_case_ : str = key.replace("""norm""" , """layer_norm""" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
snake_case_ : Union[str, Any] = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )]
snake_case_ : Optional[Any] = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(lowerCamelCase_ )-1}''' )
if "layer_norm1" in key:
snake_case_ : str = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
snake_case_ : str = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
snake_case_ : Dict = key[key.find("""block""" ) + len("""block""" )]
snake_case_ : List[Any] = key.replace(F'''block{idx}''' , F'''block.{int(lowerCamelCase_ )-1}''' )
if "attn.q" in key:
snake_case_ : Union[str, Any] = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
snake_case_ : Optional[Any] = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
snake_case_ : Optional[Any] = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
snake_case_ : List[Any] = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
snake_case_ : int = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
snake_case_ : List[Any] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
snake_case_ : Optional[Any] = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
snake_case_ : Optional[Any] = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
snake_case_ : Optional[int] = key[key.find("""linear_c""" ) + len("""linear_c""" )]
snake_case_ : int = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(lowerCamelCase_ )-1}''' )
if key.startswith("""head""" ):
snake_case_ : List[str] = key.replace("""head""" , """classifier""" )
snake_case_ : Optional[int] = value
return new_state_dict
def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
snake_case_ : Tuple = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
snake_case_ : int = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
snake_case_ : Union[str, Any] = kv_weight[
: config.hidden_sizes[i], :
]
snake_case_ : Optional[int] = kv_bias[: config.hidden_sizes[i]]
snake_case_ : List[str] = kv_weight[
config.hidden_sizes[i] :, :
]
snake_case_ : List[str] = kv_bias[
config.hidden_sizes[i] :
]
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ : Optional[int] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return image
@torch.no_grad()
def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : int = SegformerConfig()
snake_case_ : Dict = False
# set attributes based on model_name
snake_case_ : Optional[Any] = """huggingface/label-files"""
if "segformer" in model_name:
snake_case_ : Tuple = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2]
if "ade" in model_name:
snake_case_ : Optional[int] = 1_50
snake_case_ : Optional[int] = """ade20k-id2label.json"""
snake_case_ : int = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
snake_case_ : int = 19
snake_case_ : Optional[int] = """cityscapes-id2label.json"""
snake_case_ : str = (1, 19, 1_28, 1_28)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
snake_case_ : Optional[Any] = True
snake_case_ : Optional[int] = model_name[4:6]
snake_case_ : int = 10_00
snake_case_ : str = """imagenet-1k-id2label.json"""
snake_case_ : Tuple = (1, 10_00)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
snake_case_ : int = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
snake_case_ : List[str] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
snake_case_ : List[Any] = idalabel
snake_case_ : int = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
snake_case_ : Tuple = [64, 1_28, 3_20, 5_12]
snake_case_ : List[str] = 2_56
elif size == "b2":
snake_case_ : Any = [64, 1_28, 3_20, 5_12]
snake_case_ : Optional[int] = 7_68
snake_case_ : Optional[int] = [3, 4, 6, 3]
elif size == "b3":
snake_case_ : int = [64, 1_28, 3_20, 5_12]
snake_case_ : Any = 7_68
snake_case_ : str = [3, 4, 18, 3]
elif size == "b4":
snake_case_ : List[str] = [64, 1_28, 3_20, 5_12]
snake_case_ : List[str] = 7_68
snake_case_ : List[str] = [3, 8, 27, 3]
elif size == "b5":
snake_case_ : Optional[Any] = [64, 1_28, 3_20, 5_12]
snake_case_ : Optional[Any] = 7_68
snake_case_ : Union[str, Any] = [3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
snake_case_ : List[str] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=lowerCamelCase_ , align=lowerCamelCase_ , do_random_crop=lowerCamelCase_ )
# prepare image
snake_case_ : str = prepare_img()
snake_case_ : List[Any] = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
snake_case_ : Optional[Any] = torch.load(lowerCamelCase_ , map_location=torch.device("""cpu""" ) )
else:
snake_case_ : Union[str, Any] = torch.load(lowerCamelCase_ , map_location=torch.device("""cpu""" ) )["""state_dict"""]
# rename keys
snake_case_ : str = rename_keys(lowerCamelCase_ , encoder_only=lowerCamelCase_ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase_ , lowerCamelCase_ )
# create HuggingFace model and load state dict
if encoder_only:
snake_case_ : Tuple = False
snake_case_ : str = SegformerForImageClassification(lowerCamelCase_ )
else:
snake_case_ : Optional[Any] = SegformerForSemanticSegmentation(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
# forward pass
snake_case_ : int = model(lowerCamelCase_ )
snake_case_ : Optional[int] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
snake_case_ : Optional[Any] = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
snake_case_ : int = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.35_29, -10.03_04], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]],
[[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
snake_case_ : Any = torch.tensor(
[
[[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]],
[[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]],
[[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
snake_case_ : Tuple = torch.tensor(
[
[[-9.0_878, -10.20_81, -10.18_91], [-9.3_144, -10.79_41, -10.98_43], [-9.2_294, -10.38_55, -10.57_04]],
[[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]],
[[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
snake_case_ : Dict = torch.tensor(
[
[[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]],
[[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]],
[[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
snake_case_ : Optional[int] = torch.tensor(
[
[[-9.5_524, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.5_842, -12.88_51, -13.94_14]],
[[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]],
[[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
snake_case_ : Dict = torch.tensor(
[
[[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]],
[[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
snake_case_ : Optional[Any] = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.17_17], [-9.4_438, -10.90_58, -11.40_47], [-9.7_939, -12.34_95, -12.10_79]],
[[-7.1_514, -9.5_336, -10.08_60], [-9.7_776, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
snake_case_ : Optional[Any] = torch.tensor(
[
[
[-1.13_72E01, -1.27_87E01, -1.34_77E01],
[-1.25_36E01, -1.41_94E01, -1.44_09E01],
[-1.32_17E01, -1.48_88E01, -1.53_27E01],
],
[
[-1.47_91E01, -1.71_22E01, -1.82_77E01],
[-1.71_63E01, -1.91_92E01, -1.95_33E01],
[-1.78_97E01, -1.99_91E01, -2.03_15E01],
],
[
[7.67_23E-01, 4.19_21E-01, -7.78_78E-02],
[4.77_72E-01, 9.55_57E-03, -2.80_82E-01],
[3.60_32E-01, -2.48_26E-01, -5.11_68E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
snake_case_ : Union[str, Any] = torch.tensor(
[
[[-9.4_959, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]],
[[-9.8_905, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
snake_case_ : Any = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
snake_case_ : int = torch.tensor(
[
[[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]],
[[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
snake_case_ : str = torch.tensor(
[
[[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]],
[[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
snake_case_ : int = torch.tensor(
[
[[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]],
[[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
snake_case_ : Union[str, Any] = torch.tensor(
[
[[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]],
[[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
snake_case_ : Optional[Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase_ , atol=1E-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='segformer.b0.512x512.ade.160k',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__A : int = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 334 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = """ylacombe/bark-small"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = """en_speaker_1"""
lowercase__ = """This is a test string"""
lowercase__ = """speaker_embeddings_path.json"""
lowercase__ = """speaker_embeddings"""
def lowerCamelCase__ (self : str , **_UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = BarkProcessor(tokenizer=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase__ (self : str ) -> Tuple:
"""simple docstring"""
lowercase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase__ = 35
lowercase__ = 2
lowercase__ = 8
lowercase__ = {
"""semantic_prompt""": np.ones(_UpperCAmelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase__ = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
lowercase__ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase__ = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
lowercase__ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase__ = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = BarkProcessor(tokenizer=_UpperCAmelCase )
lowercase__ = processor(text=self.input_string )
lowercase__ = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 15 | 0 |
'''simple docstring'''
import requests
A = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase : int = requests.get(_NEWS_API + bbc_news_api_key).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1):
print(F'''{i}.) {article['title']}''')
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 320 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
A : Optional[Any] = logging.get_logger(__name__)
A : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A : Any = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : Optional[int] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : int = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
A : Optional[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
A : Any = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
A : Union[str, Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
A : Tuple = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
A : Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ = DPRContextEncoderTokenizer
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ = DPRQuestionEncoderTokenizer
A : Any = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
A : Optional[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
A : int = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(UpperCAmelCase__ )
class A :
'''simple docstring'''
def __call__(self : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Union[bool, str] = False , _UpperCAmelCase : Union[bool, str] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[bool] = None , **_UpperCAmelCase : Any , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
elif titles is None or texts is None:
lowercase__ = titles if texts is None else texts
return super().__call__(
_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = titles if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [titles]
lowercase__ = texts if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [texts]
lowercase__ = len(_UpperCAmelCase )
lowercase__ = questions if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [questions] * n_passages
assert len(_UpperCAmelCase ) == len(
_UpperCAmelCase ), f'''There should be as many titles than texts but got {len(_UpperCAmelCase )} titles and {len(_UpperCAmelCase )} texts.'''
lowercase__ = super().__call__(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["""input_ids"""]
lowercase__ = super().__call__(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["""input_ids"""]
lowercase__ = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_UpperCAmelCase , _UpperCAmelCase )
]
}
if return_attention_mask is not False:
lowercase__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase__ = attention_mask
return self.pad(_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : BatchEncoding , _UpperCAmelCase : DPRReaderOutput , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
lowercase__ = reader_input["""input_ids"""]
lowercase__ , lowercase__ , lowercase__ = reader_output[:3]
lowercase__ = len(_UpperCAmelCase )
lowercase__ = sorted(range(_UpperCAmelCase ) , reverse=_UpperCAmelCase , key=relevance_logits.__getitem__ )
lowercase__ = []
for doc_id in sorted_docs:
lowercase__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase__ = sequence_ids.index(self.pad_token_id )
else:
lowercase__ = len(_UpperCAmelCase )
lowercase__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_UpperCAmelCase , top_spans=_UpperCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_UpperCAmelCase , start_index=_UpperCAmelCase , end_index=_UpperCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_UpperCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : int , _UpperCAmelCase : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
lowercase__ = []
for start_index, start_score in enumerate(_UpperCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase__ = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
lowercase__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
lowercase__ = end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_UpperCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class A ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = READER_PRETRAINED_VOCAB_FILES_MAP
A__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = READER_PRETRAINED_INIT_CONFIGURATION
A__ = ['''input_ids''', '''attention_mask''']
A__ = DPRReaderTokenizer
| 15 | 0 |
from __future__ import annotations
def a ( A__ : list[float] , A__ : list[float] ) -> float:
"""simple docstring"""
_lowercase =sorted(numsa + numsa )
_lowercase , _lowercase =divmod(len(A__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = [float(x) for x in input('Enter the elements of first array: ').split()]
lowercase_ = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
| 291 |
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[int] ) -> list[int]: # This function is recursive
"""simple docstring"""
lowercase__ = len(__magic_name__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ = array[0]
lowercase__ = False
lowercase__ = 1
lowercase__ = []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ = True
lowercase__ = [element for element in array[i:] if element >= array[i]]
lowercase__ = longest_subsequence(__magic_name__ )
if len(__magic_name__ ) > len(__magic_name__ ):
lowercase__ = temp_array
else:
i += 1
lowercase__ = [element for element in array[1:] if element >= pivot]
lowercase__ = [pivot, *longest_subsequence(__magic_name__ )]
if len(__magic_name__ ) > len(__magic_name__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__lowerCAmelCase = 'base_with_context'
def __UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : Tuple ):
"""simple docstring"""
a_ = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
a_ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowercase_ )
for lyr_num, lyr in enumerate(model.encoders ):
a_ = weights[F'layers_{lyr_num}']
a_ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
a_ = ly_weight['attention']
a_ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
a_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def __UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : str ):
"""simple docstring"""
a_ = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
a_ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowercase_ )
for lyr_num, lyr in enumerate(model.encoders ):
a_ = weights[F'layers_{lyr_num}']
a_ = ly_weight['attention']
a_ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
a_ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
a_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
a_ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def __UpperCamelCase ( lowercase_ : Optional[Any] , lowercase_ : Tuple ):
"""simple docstring"""
a_ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
a_ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowercase_ )
a_ = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
a_ = weights[F'layers_{lyr_num}']
a_ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
a_ = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
a_ = ly_weight['self_attention']
a_ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
a_ = ly_weight['MultiHeadDotProductAttention_0']
a_ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
a_ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
a_ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
a_ = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
a_ = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
a_ = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def __UpperCamelCase ( lowercase_ : int ):
"""simple docstring"""
a_ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
a_ = jnp.tree_util.tree_map(onp.array , lowercase_ )
a_ = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
a_ = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
a_ = inference.parse_training_gin_file(lowercase_ , lowercase_ )
a_ = inference.InferenceModel(args.checkpoint_path , lowercase_ )
a_ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
a_ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
a_ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
a_ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
a_ = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , lowercase_ )
a_ = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , lowercase_ )
a_ = load_decoder(ta_checkpoint['target']['decoder'] , lowercase_ )
a_ = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
a_ = SpectrogramDiffusionPipeline(
notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
__lowerCAmelCase = parser.parse_args()
main(args)
| 536 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
A : List[Any] = None
A : Optional[Any] = logging.get_logger(__name__)
A : str = '▁'
A : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
A : Any = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
A : Optional[int] = {
'google/pegasus-xsum': 5_1_2,
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = PegasusTokenizer
A__ = ['''input_ids''', '''attention_mask''']
def __init__(self : Tuple , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Union[str, Any]="<pad>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : int="<unk>" , _UpperCAmelCase : Optional[Any]="<mask_2>" , _UpperCAmelCase : Optional[Any]="<mask_1>" , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Dict=103 , **_UpperCAmelCase : Dict , ) -> Dict:
"""simple docstring"""
lowercase__ = offset
if additional_special_tokens is not None:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(_UpperCAmelCase )}, but is'''
f''' {type(_UpperCAmelCase )}''' )
lowercase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(_UpperCAmelCase ) , self.offset - 1 )
]
if len(set(_UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
lowercase__ = additional_special_tokens_extended
else:
lowercase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , pad_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , mask_token_sent=_UpperCAmelCase , offset=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List , _UpperCAmelCase : Optional[List] = None , _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(_UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(_UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCamelCase__ (self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ (self : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 15 | 0 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase = None , lowercase = None , lowercase=None , lowercase=None ):
"""simple docstring"""
if not conversation_id:
A_ : int = uuid.uuida()
if past_user_inputs is None:
A_ : str = []
if generated_responses is None:
A_ : str = []
A_ : str = conversation_id
A_ : List[str] = past_user_inputs
A_ : int = generated_responses
A_ : Union[str, Any] = text
def __eq__( self , lowercase ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase_ ( self , lowercase , lowercase = False ):
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
A_ : Union[str, Any] = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
A_ : Optional[Any] = text
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
A_ : Dict = None
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
self.generated_responses.append(_UpperCAmelCase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
"""simple docstring"""
A_ : Union[str, Any] = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
A_ : Optional[Any] = 'user' if is_user else 'bot'
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
UpperCAmelCase__ , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
if self.tokenizer.pad_token_id is None:
A_ : Tuple = self.tokenizer.eos_token
def lowerCAmelCase_ ( self , lowercase=None , lowercase=None , lowercase=None , **lowercase ):
"""simple docstring"""
A_ : Dict = {}
A_ : str = {}
A_ : Dict = {}
if min_length_for_response is not None:
A_ : List[str] = min_length_for_response
if minimum_tokens is not None:
A_ : Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
A_ : Any = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
A_ : Optional[Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_UpperCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self , lowercase , lowercase=0 , **lowercase ):
"""simple docstring"""
A_ : Tuple = super().__call__(_UpperCAmelCase , num_workers=_UpperCAmelCase , **_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase_ ( self , lowercase , lowercase=3_2 ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
A_ : Dict = self.tokenizer._build_conversation_input_ids(_UpperCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
A_ : Dict = self._legacy_parse_and_tokenize(_UpperCAmelCase )
if self.framework == "pt":
A_ : Union[str, Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
A_ : str = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase_ ( self , lowercase , lowercase=1_0 , **lowercase ):
"""simple docstring"""
A_ : Optional[Any] = generate_kwargs.get('max_length' , self.model.config.max_length )
A_ : int = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
A_ : str = max_length - minimum_tokens
A_ : List[Any] = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
A_ : List[Any] = model_inputs['attention_mask'][:, -trim:]
A_ : str = model_inputs.pop('conversation' )
A_ : str = max_length
A_ : Optional[int] = self.model.generate(**_UpperCAmelCase , **_UpperCAmelCase )
if self.model.config.is_encoder_decoder:
A_ : Dict = 1
else:
A_ : Union[str, Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase_ ( self , lowercase , lowercase=True ):
"""simple docstring"""
A_ : Optional[Any] = model_outputs['output_ids']
A_ : Optional[int] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , )
A_ : Dict = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(_UpperCAmelCase )
return conversation
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = self.tokenizer.eos_token_id
A_ : Dict = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
if len(_UpperCAmelCase ) > self.tokenizer.model_max_length:
A_ : int = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 558 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
A : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
A : List[str] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
A : Any = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int = CHRF.CHAR_ORDER , _UpperCAmelCase : int = CHRF.WORD_ORDER , _UpperCAmelCase : int = CHRF.BETA , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , ) -> int:
"""simple docstring"""
lowercase__ = len(references[0] )
if any(len(_UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowercase__ = [[refs[i] for refs in references] for i in range(_UpperCAmelCase )]
lowercase__ = CHRF(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__ = sb_chrf.corpus_score(_UpperCAmelCase , _UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 15 | 0 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
A_ = sys.version_info >= (3, 10)
def _UpperCamelCase ( A=None , A=None ):
return field(default_factory=lambda: default , metadata=A )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : Tuple = 42
__lowerCamelCase : Optional[Any] = 42
__lowerCamelCase : Optional[int] = 42
__lowerCamelCase : Optional[int] = 42
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : int = 42
__lowerCamelCase : List[str] = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : Any = False
__lowerCamelCase : Tuple = True
__lowerCamelCase : int = None
class __lowerCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = "titi"
__lowerCamelCase : Any = "toto"
class __lowerCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = "titi"
__lowerCamelCase : str = "toto"
__lowerCamelCase : Optional[int] = 42
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : List[str] = "toto"
def UpperCamelCase__ ( self: Any ):
UpperCamelCase_ =BasicEnum(self.foo )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : Tuple = "toto"
def UpperCamelCase__ ( self: List[str] ):
UpperCamelCase_ =MixedTypeEnum(self.foo )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : int = None
__lowerCamelCase : Optional[int] = field(default=UpperCAmelCase__ , metadata={"help": "help message"} )
__lowerCamelCase : Dict = None
__lowerCamelCase : List[Any] = list_field(default=[] )
__lowerCamelCase : Dict = list_field(default=[] )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : Optional[Any] = list_field(default=[] )
__lowerCamelCase : Union[str, Any] = list_field(default=[1, 2, 3] )
__lowerCamelCase : int = list_field(default=["Hallo", "Bonjour", "Hello"] )
__lowerCamelCase : List[Any] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : Dict = field()
__lowerCamelCase : Union[str, Any] = field()
__lowerCamelCase : Union[str, Any] = field()
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ =BasicEnum(self.required_enum )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : Optional[int] = 42
__lowerCamelCase : List[str] = field()
__lowerCamelCase : str = None
__lowerCamelCase : int = field(default="toto" , metadata={"help": "help message"} )
__lowerCamelCase : Union[str, Any] = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : List[Any] = False
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Optional[int] = None
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : Dict = None
__lowerCamelCase : str = field(default=UpperCAmelCase__ , metadata={"help": "help message"} )
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = list_field(default=[] )
__lowerCamelCase : Tuple = list_field(default=[] )
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self: Any , UpperCamelCase_: argparse.ArgumentParser , UpperCamelCase_: argparse.ArgumentParser ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCamelCase_ ={k: v for k, v in vars(_UpperCAmelCase ).items() if k != "container"}
UpperCamelCase_ ={k: v for k, v in vars(_UpperCAmelCase ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , _UpperCAmelCase ) and yy.get("choices" , _UpperCAmelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](_UpperCAmelCase ) , yy["type"](_UpperCAmelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self: Any ):
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
UpperCamelCase_ =argparse.ArgumentParser()
expected.add_argument("--foo" , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument("--bar" , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument("--baz" , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument("--flag" , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs="?" )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ =["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((UpperCamelCase_ ) , ) =parser.parse_args_into_dataclasses(_UpperCAmelCase , look_for_args_file=_UpperCAmelCase )
self.assertFalse(example.flag )
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
UpperCamelCase_ =argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=_UpperCAmelCase )
expected.add_argument("--baz" , default="toto" , type=_UpperCAmelCase , help="help message" )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self: Any ):
UpperCamelCase_ =argparse.ArgumentParser()
expected.add_argument("--foo" , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs="?" )
expected.add_argument("--baz" , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=_UpperCAmelCase , dest="baz" )
expected.add_argument("--opt" , type=_UpperCAmelCase , default=_UpperCAmelCase )
UpperCamelCase_ =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCAmelCase )
for dataclass_type in dataclass_types:
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ =parser.parse_args([] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
UpperCamelCase_ =parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
UpperCamelCase_ =parser.parse_args(["--foo", "--baz"] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
UpperCamelCase_ =parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
UpperCamelCase_ =parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
def UpperCamelCase__ ( self: Tuple ):
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
UpperCamelCase_ =argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ =parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
UpperCamelCase_ =parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCamelCase_ =parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
UpperCamelCase_ =parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCamelCase_ =parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
UpperCamelCase_ =parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def UpperCamelCase__ ( self: List[str] ):
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : List[Any] = "toto"
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
UpperCamelCase_ =argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ =parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
UpperCamelCase_ =parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
UpperCamelCase_ =parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
UpperCamelCase_ =argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=_UpperCAmelCase )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=_UpperCAmelCase )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=_UpperCAmelCase )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=_UpperCAmelCase )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ =parser.parse_args([] )
self.assertEqual(
_UpperCAmelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCamelCase_ =parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(_UpperCAmelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =argparse.ArgumentParser()
expected.add_argument("--foo" , default=_UpperCAmelCase , type=_UpperCAmelCase )
expected.add_argument("--bar" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="help message" )
expected.add_argument("--baz" , default=_UpperCAmelCase , type=_UpperCAmelCase )
expected.add_argument("--ces" , nargs="+" , default=[] , type=_UpperCAmelCase )
expected.add_argument("--des" , nargs="+" , default=[] , type=_UpperCAmelCase )
UpperCamelCase_ =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCAmelCase )
for dataclass_type in dataclass_types:
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ =parser.parse_args([] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , bar=_UpperCAmelCase , baz=_UpperCAmelCase , ces=[] , des=[] ) )
UpperCamelCase_ =parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(_UpperCAmelCase , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def UpperCamelCase__ ( self: str ):
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
UpperCamelCase_ =argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument("--required_str" , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=_UpperCAmelCase , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
UpperCamelCase_ =argparse.ArgumentParser()
expected.add_argument("--foo" , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=_UpperCAmelCase , )
expected.add_argument("--opt" , type=_UpperCAmelCase , default=_UpperCAmelCase )
expected.add_argument("--baz" , default="toto" , type=_UpperCAmelCase , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=_UpperCAmelCase )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
UpperCamelCase_ ={
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
UpperCamelCase_ =parser.parse_dict(_UpperCAmelCase )[0]
UpperCamelCase_ =BasicExample(**_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
UpperCamelCase_ ={
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(_UpperCAmelCase , parser.parse_dict , _UpperCAmelCase , allow_extra_keys=_UpperCAmelCase )
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
UpperCamelCase_ ={
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ =os.path.join(_UpperCAmelCase , "temp_json" )
os.mkdir(_UpperCAmelCase )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ =parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
UpperCamelCase_ =BasicExample(**_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
UpperCamelCase_ ={
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ =os.path.join(_UpperCAmelCase , "temp_yaml" )
os.mkdir(_UpperCAmelCase )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ =parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
UpperCamelCase_ =BasicExample(**_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self: Any ):
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
| 391 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = PegasusTokenizer(_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def lowerCamelCase__ (self : Tuple , **_UpperCAmelCase : int ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase__ (self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = """</s>"""
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_UpperCAmelCase ) , 1103 )
def lowerCamelCase__ (self : str ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowerCamelCase__ (self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
lowercase__ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
lowercase__ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
lowercase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowercase__ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowercase__ = """To ensure a smooth flow of bank resolutions."""
lowercase__ = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowercase__ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = ["""This is going to be way too long.""" * 150, """short example"""]
lowercase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowercase__ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
lowercase__ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def lowerCamelCase__ (self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = PegasusTokenizer(_UpperCAmelCase , offset=0 , mask_token_sent=_UpperCAmelCase , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def lowerCamelCase__ (self : str , **_UpperCAmelCase : Union[str, Any] ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
lowercase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
lowercase__ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
lowercase__ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@require_torch
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = ["""This is going to be way too long.""" * 1000, """short example"""]
lowercase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowercase__ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
lowercase__ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
def lowerCamelCase__ (self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
lowercase__ = self._large_tokenizer(_UpperCAmelCase ).input_ids
self.assertListEqual(
_UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 15 | 0 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_a : Optional[Any] = flax_key_tuple[:-1] + ('weight',)
_a : Optional[int] = torch.permute(A , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(A ):
# linear layer
_a : Tuple = flax_key_tuple[:-1] + ('weight',)
_a : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_a : List[Any] = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
if "metadata" in layer:
_a : Tuple = layer.split('metadata' )
_a : Union[str, Any] = ''.join(split_layer[0] )[:-1]
_a : Dict = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
_a : Any = layer.split('kvstore' )
_a : str = ''.join(split_layer[0] )[:-1]
_a : Optional[int] = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
_a : Optional[int] = layer.split('/' )
_a : Optional[Any] = '/'.join(split_layer[:-1] )
_a : int = (split_layer[-1],)
if "kvstore/path" in layer:
_a : List[str] = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
_a : Dict = 'file'
else:
_a : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : Any = rename_keys(A )
_a : List[Any] = {}
for k, v in current_block.items():
_a : str = v
_a : Optional[int] = new_current_block
torch.save(A , A )
def UpperCAmelCase_ ( A , A , A , A , A = WEIGHTS_NAME ):
'''simple docstring'''
_a : str = convert_file_size_to_int(A )
_a : Dict = []
_a : Optional[Any] = {}
_a : Tuple = 0
_a : Union[str, Any] = 0
os.makedirs(A , exist_ok=A )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
_a : List[Any] = serialization.msgpack_restore(fp.read() )['optimizer']['target']
_a : Tuple = flatten_dict(A , sep='/' )
_a : Tuple = {}
for layer in checkpoint_info.keys():
_a , _a , _a : int = get_key_and_tensorstore_dict(
A , A , A )
if curr_real_layer_name in all_layers:
_a : str = content
else:
_a : Optional[int] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_a : Tuple = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_a : Optional[Any] = torch.tensor(A )
_a : Any = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_a , _a : List[Any] = rename_base_flax_keys(tuple(key.split('/' ) ) , A )
_a : Dict = '/'.join(A )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_a : int = os.path.join(
A , weights_name.replace('.bin' , f'''-{len(A )+1:05d}-of-???.bin''' ) )
rename_and_save_block(A , A )
sharded_state_dicts.append(current_block.keys() )
del current_block
_a : Any = {}
_a : Dict = 0
_a : List[str] = raw_weights.to(getattr(A , A ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_a : Union[str, Any] = os.path.join(A , weights_name.replace('.bin' , f'''-{len(A )+1:05d}-of-???.bin''' ) )
rename_and_save_block(A , A )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(A ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_a : str = {}
_a : List[Any] = {}
for idx, shard in enumerate(A ):
_a : Tuple = weights_name.replace(
'.bin' , f'''-{idx+1:05d}-of-{len(A ):05d}.bin''' ) # len(sharded_state_dicts):05d}
_a : str = os.path.join(A , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(A , os.path.join(A , A ) )
_a : Optional[Any] = shard
for key in shard:
_a : Any = shard_file
# Add the metadata
_a : Optional[int] = {'total_size': total_size}
_a : Optional[int] = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(A , A ) , 'w' , encoding='utf-8' ) as f:
_a : Any = json.dumps(A , indent=2 , sort_keys=A ) + '\n'
f.write(A )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCAmelCase_ : Tuple = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCAmelCase_ ( ):
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_a : str = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
_a : Any = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
_a : int = TaTokenizer.from_pretrained('t5-small' )
_a : Union[str, Any] = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
_a : str = tokenizer(A , return_tensors='pt' ).input_ids
_a : str = model.generate(A , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 120 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def UpperCamelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowercase__ = load_file(__magic_name__ )
lowercase__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowercase__ = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
lowercase__ = pipeline.text_encoder
else:
lowercase__ = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
lowercase__ = pipeline.unet
# find the target layer
lowercase__ = layer_infos.pop(0 )
while len(__magic_name__ ) > -1:
try:
lowercase__ = curr_layer.__getattr__(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase__ = layer_infos.pop(0 )
elif len(__magic_name__ ) == 0:
break
except Exception:
if len(__magic_name__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowercase__ = layer_infos.pop(0 )
lowercase__ = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(__magic_name__ )
else:
pair_keys.append(__magic_name__ )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowercase__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowercase__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__magic_name__ , __magic_name__ ).unsqueeze(2 ).unsqueeze(3 )
else:
lowercase__ = state_dict[pair_keys[0]].to(torch.floataa )
lowercase__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__magic_name__ , __magic_name__ )
# update visited list
for item in pair_keys:
visited.append(__magic_name__ )
return pipeline
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
A : str = parser.parse_args()
A : Tuple = args.base_model_path
A : List[str] = args.checkpoint_path
A : Optional[int] = args.dump_path
A : Optional[int] = args.lora_prefix_unet
A : Any = args.lora_prefix_text_encoder
A : Any = args.alpha
A : List[str] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
A : int = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 15 | 0 |
"""simple docstring"""
def a ( __UpperCAmelCase : list , __UpperCAmelCase : list , __UpperCAmelCase : int ) -> int:
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError("""The length of profit and weight must be same.""" )
if max_weight <= 0:
raise ValueError("""max_weight must greater than zero.""" )
if any(p < 0 for p in profit ):
raise ValueError("""Profit can not be negative.""" )
if any(w < 0 for w in weight ):
raise ValueError("""Weight can not be negative.""" )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__magic_name__: Tuple = [p / w for p, w in zip(__UpperCAmelCase , __UpperCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
__magic_name__: Tuple = sorted(__UpperCAmelCase )
# declaring useful variables
__magic_name__: Optional[Any] = len(__UpperCAmelCase )
__magic_name__: List[Any] = 0
__magic_name__: Dict = 0
__magic_name__: Any = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__magic_name__: Tuple = sorted_profit_by_weight[length - i - 1]
__magic_name__: Optional[Any] = profit_by_weight.index(__UpperCAmelCase )
__magic_name__: List[str] = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
__lowerCamelCase = [int(x) for x in input('Input profits separated by spaces: ').split()]
__lowerCamelCase = [int(x) for x in input('Input weights separated by spaces: ').split()]
__lowerCamelCase = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 96 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Tuple = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''ibert'''
def __init__(self : int , _UpperCAmelCase : Union[str, Any]=3_0522 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[Any]=3072 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Dict=1E-1_2 , _UpperCAmelCase : int=1 , _UpperCAmelCase : str=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : int="absolute" , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[Any]="none" , **_UpperCAmelCase : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = quant_mode
lowercase__ = force_dequant
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase__ (self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 15 | 0 |
import os
import sys
A : Tuple = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
A : Dict = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def a__ ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoConfig.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def a__ ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoTokenizer.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModel.__doc__ )
def a__ ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModel.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def a__ ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForCausalLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def a__ ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForMaskedLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def a__ ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForSequenceClassification.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def a__ ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForQuestionAnswering.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
| 140 |
from math import log
from scipy.constants import Boltzmann, physical_constants
A : Any = 3_0_0 # TEMPERATURE (unit = K)
def UpperCamelCase ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_UpperCamelCase : Optional[Any] = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
_UpperCamelCase : Optional[Any] = 10
_UpperCamelCase : Any = 2_56
def __snake_case ( lowerCAmelCase : List[str] ):
if len(lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
__UpperCAmelCase = MinHash(num_perm=lowerCAmelCase )
for token in set(lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def __snake_case ( lowerCAmelCase : str ):
return {t for t in NON_ALPHA.split(lowerCAmelCase ) if len(t.strip() ) > 0}
class _lowercase:
"""simple docstring"""
def __init__( self: Optional[int] ,*,
a: float = 0.85 ,):
__UpperCAmelCase = duplication_jaccard_threshold
__UpperCAmelCase = NUM_PERM
__UpperCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold ,num_perm=self._num_perm )
__UpperCAmelCase = defaultdict(_UpperCAmelCase )
def snake_case ( self: List[Any] ,a: Tuple ,a: MinHash ):
__UpperCAmelCase = self._index.query(_UpperCAmelCase )
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""" )
return
self._index.insert(_UpperCAmelCase ,_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_UpperCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_UpperCAmelCase )
def snake_case ( self: List[Any] ):
__UpperCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
__UpperCAmelCase = [base] + list(_UpperCAmelCase )
# reformat the cluster to be a list of dict
__UpperCAmelCase = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(_UpperCAmelCase )
return duplicate_clusters
def snake_case ( self: int ,a: str ):
__UpperCAmelCase = self.get_duplicate_clusters()
with open(_UpperCAmelCase ,'w' ) as f:
json.dump(_UpperCAmelCase ,_UpperCAmelCase )
def __snake_case ( lowerCAmelCase : Any ):
__UpperCAmelCase , __UpperCAmelCase = element
__UpperCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __snake_case ( lowerCAmelCase : Type[Dataset] ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def __snake_case ( lowerCAmelCase : Type[Dataset] , lowerCAmelCase : float ):
__UpperCAmelCase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase , lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __snake_case ( lowerCAmelCase : str , lowerCAmelCase : str ):
__UpperCAmelCase = get_tokens(lowerCAmelCase )
__UpperCAmelCase = get_tokens(lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_UpperCamelCase : Tuple = None
def __snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ):
__UpperCAmelCase = []
for elementa in cluster:
__UpperCAmelCase = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
__UpperCAmelCase = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(lowerCAmelCase , lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__UpperCAmelCase = 1
extremes.append(lowerCAmelCase )
return extremes
def __snake_case ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] ):
global _shared_dataset
__UpperCAmelCase = dataset
__UpperCAmelCase = []
__UpperCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase , lowerCAmelCase , ) , total=len(lowerCAmelCase ) , ):
extremes_list.append(lowerCAmelCase )
return extremes_list
def __snake_case ( lowerCAmelCase : Type[Dataset] , lowerCAmelCase : float = 0.85 ):
__UpperCAmelCase = make_duplicate_clusters(lowerCAmelCase , lowerCAmelCase )
__UpperCAmelCase = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
__UpperCAmelCase = {}
__UpperCAmelCase = find_extremes(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
__UpperCAmelCase = element
__UpperCAmelCase = duplicate_indices - set(extreme_dict.keys() )
__UpperCAmelCase = dataset.filter(lambda lowerCAmelCase , lowerCAmelCase : idx not in remove_indices , with_indices=lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__UpperCAmelCase = element['base_index'] in extreme_dict
if element["is_extreme"]:
__UpperCAmelCase = extreme_dict[element['base_index']]['copies']
print(F"""Original dataset size: {len(lowerCAmelCase )}""" )
print(F"""Number of duplicate clusters: {len(lowerCAmelCase )}""" )
print(F"""Files in duplicate cluster: {len(lowerCAmelCase )}""" )
print(F"""Unique files in duplicate cluster: {len(lowerCAmelCase )}""" )
print(F"""Filtered dataset size: {len(lowerCAmelCase )}""" )
return ds_filter, duplicate_clusters
| 396 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class A :
'''simple docstring'''
A__ = 42
A__ = None
A__ = None
def UpperCamelCase ( ) -> Node | None:
"""simple docstring"""
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> int:
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCamelCase ( __magic_name__ : Node | None ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCamelCase ( __magic_name__ : Node | None , __magic_name__ : int ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
def populate_output(__magic_name__ : Node | None , __magic_name__ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__magic_name__ , __magic_name__ )
return output
def UpperCamelCase ( __magic_name__ : Node | None , __magic_name__ : int ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
def populate_output(__magic_name__ : Node | None , __magic_name__ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__magic_name__ , __magic_name__ )
return output
def UpperCamelCase ( __magic_name__ : Node | None ) -> Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(__magic_name__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__magic_name__ , __magic_name__ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(__magic_name__ , __magic_name__ ) )
lowercase__ = 0
return output
def UpperCamelCase ( ) -> None: # Main function for testing.
"""simple docstring"""
lowercase__ = make_tree()
print(f'''In-order Traversal: {inorder(__magic_name__ )}''' )
print(f'''Pre-order Traversal: {preorder(__magic_name__ )}''' )
print(f'''Post-order Traversal: {postorder(__magic_name__ )}''' , """\n""" )
print(f'''Height of Tree: {height(__magic_name__ )}''' , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__magic_name__ ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__magic_name__ ) + 1 ):
print(f'''Level {level}:''' , get_nodes_from_left_to_right(__magic_name__ , level=__magic_name__ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 15 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__A = False
class _snake_case ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__lowerCamelCase : List[Any] = "A painting of a squirrel eating a burger "
__lowerCamelCase : Optional[Any] = torch.manual_seed(0 )
__lowerCamelCase : List[str] = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
__lowerCamelCase : List[str] = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__lowerCamelCase : str = generator.manual_seed(0 )
__lowerCamelCase : List[Any] = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = "A painting of a squirrel eating a burger "
__lowerCamelCase : Optional[Any] = torch.manual_seed(0 )
__lowerCamelCase : Dict = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
__lowerCamelCase : Optional[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase : int = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 646 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Any = logging.get_logger(__name__)
A : Tuple = {
'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''poolformer'''
def __init__(self : Dict , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : str=16 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : Union[str, Any]=4.0 , _UpperCAmelCase : str=[2, 2, 6, 2] , _UpperCAmelCase : int=[64, 128, 320, 512] , _UpperCAmelCase : Union[str, Any]=[7, 3, 3, 3] , _UpperCAmelCase : List[Any]=[4, 2, 2, 2] , _UpperCAmelCase : Union[str, Any]=[2, 1, 1, 1] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=1E-5 , _UpperCAmelCase : Tuple=0.02 , **_UpperCAmelCase : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = stride
lowercase__ = padding
lowercase__ = pool_size
lowercase__ = hidden_sizes
lowercase__ = mlp_ratio
lowercase__ = depths
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = num_encoder_blocks
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_layer_scale
lowercase__ = layer_scale_init_value
lowercase__ = initializer_range
super().__init__(**_UpperCAmelCase )
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = version.parse('''1.11''' )
@property
def lowerCamelCase__ (self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ (self : Dict ) -> float:
"""simple docstring"""
return 2E-3
| 15 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__A : List[Any] = None
__A : Optional[Any] = logging.get_logger(__name__)
__A : str = '▁'
__A : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__A : Any = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
__A : Optional[int] = {
'google/pegasus-xsum': 512,
}
class __UpperCamelCase ( UpperCAmelCase__ ):
lowercase : Dict = VOCAB_FILES_NAMES
lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Tuple = PegasusTokenizer
lowercase : List[str] = ['input_ids', 'attention_mask']
def __init__( self :Tuple ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :Union[str, Any]="<pad>" ,_UpperCamelCase :int="</s>" ,_UpperCamelCase :int="<unk>" ,_UpperCamelCase :Optional[Any]="<mask_2>" ,_UpperCamelCase :Optional[Any]="<mask_1>" ,_UpperCamelCase :Optional[Any]=None ,_UpperCamelCase :Dict=1_0_3 ,**_UpperCamelCase :Dict ,):
snake_case_ : List[str] = offset
if additional_special_tokens is not None:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError(
F'''additional_special_tokens should be of type {type(_UpperCAmelCase )}, but is'''
F''' {type(_UpperCAmelCase )}''' )
snake_case_ : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(_UpperCAmelCase ) ,self.offset - 1 )
]
if len(set(_UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
snake_case_ : List[str] = additional_special_tokens_extended
else:
snake_case_ : Any = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 ,self.offset )]
super().__init__(
_UpperCAmelCase ,tokenizer_file=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,eos_token=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,mask_token=_UpperCAmelCase ,mask_token_sent=_UpperCAmelCase ,offset=_UpperCAmelCase ,additional_special_tokens=_UpperCAmelCase ,**_UpperCAmelCase ,)
snake_case_ : Dict = vocab_file
snake_case_ : str = False if not self.vocab_file else True
def a__ ( self :Optional[Any] ,_UpperCamelCase :Tuple ):
snake_case_ : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
F''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def a__ ( self :Optional[Any] ,_UpperCamelCase :List ,_UpperCamelCase :Optional[List] = None ,_UpperCamelCase :bool = False ):
if already_has_special_tokens:
return self._special_token_mask(_UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(_UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def a__ ( self :int ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Union[str, Any]=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def a__ ( self :str ,_UpperCamelCase :str ,_UpperCamelCase :Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : List[Any] = os.path.join(
_UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file ,_UpperCAmelCase )
return (out_vocab_file,) | 334 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@slow
@require_torch
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
lowercase__ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
lowercase__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowercase__ = bertabert.config.encoder.vocab_size
lowercase__ = tokenizer.sep_token_id
lowercase__ = tokenizer.cls_token_id
lowercase__ = 128
lowercase__ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
lowercase__ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
lowercase__ = train_dataset.select(range(32 ) )
lowercase__ = val_dataset.select(range(16 ) )
lowercase__ = 4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase : Any ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_UpperCAmelCase , max_length=512 )
lowercase__ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_UpperCAmelCase , max_length=128 )
lowercase__ = inputs.input_ids
lowercase__ = inputs.attention_mask
lowercase__ = outputs.input_ids
lowercase__ = outputs.input_ids.copy()
lowercase__ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
lowercase__ = outputs.attention_mask
assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase : List[Any] ):
lowercase__ = pred.label_ids
lowercase__ = pred.predictions
# all unnecessary tokens are removed
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
lowercase__ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
lowercase__ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
lowercase__ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = SeqaSeqTrainingArguments(
output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy="""steps""" , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# start training
trainer.train()
| 15 | 0 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def UpperCAmelCase ( UpperCAmelCase__ : int = 8):
lowerCamelCase : Optional[Any] = ascii_letters + digits + punctuation
return "".join(secrets.choice(UpperCAmelCase__) for _ in range(UpperCAmelCase__))
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int):
i -= len(UpperCAmelCase__)
lowerCamelCase : Optional[int] = i // 3
lowerCamelCase : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCamelCase : Union[str, Any] = (
chars_incl
+ random(UpperCAmelCase__ , quotient + remainder)
+ random(UpperCAmelCase__ , UpperCAmelCase__)
+ random(UpperCAmelCase__ , UpperCAmelCase__)
)
lowerCamelCase : Any = list(UpperCAmelCase__)
shuffle(UpperCAmelCase__)
return "".join(UpperCAmelCase__)
# random is a generalised function for letters, characters and numbers
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int):
return "".join(secrets.choice(UpperCAmelCase__) for _ in range(UpperCAmelCase__))
def UpperCAmelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any]):
pass # Put your code here...
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple):
pass # Put your code here...
def UpperCAmelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any]):
pass # Put your code here...
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int = 8):
if len(UpperCAmelCase__) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCamelCase : Dict = any(char in ascii_uppercase for char in password)
lowerCamelCase : Any = any(char in ascii_lowercase for char in password)
lowerCamelCase : Optional[int] = any(char in digits for char in password)
lowerCamelCase : Optional[int] = any(char in punctuation for char in password)
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def UpperCAmelCase ( ):
lowerCamelCase : Optional[int] = int(input('Please indicate the max length of your password: ').strip())
lowerCamelCase : Union[str, Any] = input(
'Please indicate the characters that must be in your password: ').strip()
print('Password generated:' , password_generator(UpperCAmelCase__))
print(
'Alternative Password generated:' , alternative_password_generator(UpperCAmelCase__ , UpperCAmelCase__) , )
print('[If you are thinking of using this passsword, You better save it.]')
if __name__ == "__main__":
main()
| 320 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : Union[str, Any] = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = ['ConvNextFeatureExtractor']
A : Optional[Any] = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
A : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 15 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
_a = RobertaTokenizer
_a = RobertaTokenizerFast
_a = True
_a = {"""cls_token""": """<s>"""}
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_lowercase =dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
_lowercase =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowercase ={'unk_token': '<unk>'}
_lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCAmelCase ) )
def A__ ( self , **lowerCAmelCase ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def A__ ( self , **lowerCAmelCase ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def A__ ( self , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase ='lower newer'
_lowercase ='lower newer'
return input_text, output_text
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowercase ='lower newer'
_lowercase =['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_lowercase =tokenizer.tokenize(_UpperCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowercase =tokens + [tokenizer.unk_token]
_lowercase =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase =self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_UpperCAmelCase ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_UpperCAmelCase ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =self.tokenizer_class.from_pretrained('roberta-base' )
_lowercase =tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
_lowercase =tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
_lowercase =tokenizer.encode(
'sequence builders' , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
_lowercase =tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
_lowercase =tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
_lowercase =tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.get_tokenizer()
_lowercase ='Encode this sequence.'
_lowercase =tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
_lowercase =tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
_lowercase =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowercase =tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
_lowercase =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_lowercase =tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
_lowercase =tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing spaces after special tokens
_lowercase ='<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )} ) # mask token has a left space
_lowercase =tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
_lowercase ='Encode <mask> sequence'
_lowercase ='Encode <mask>sequence'
_lowercase =tokenizer.encode(_UpperCAmelCase )
_lowercase =encoded.index(_UpperCAmelCase )
_lowercase =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowercase =tokenizer.encode(_UpperCAmelCase )
_lowercase =encoded.index(_UpperCAmelCase )
_lowercase =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowercase =self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
_lowercase =self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
_lowercase ='A, <mask> AllenNLP sentence.'
_lowercase =tokenizer_r.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
_lowercase =tokenizer_p.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_lowercase =tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_lowercase =tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
_UpperCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_UpperCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def A__ ( self ) -> List[str]:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_lowercase =self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
_lowercase =json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_lowercase =json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _UpperCAmelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , _UpperCAmelCase )
self.assertEqual(post_processor_state['trim_offsets'] , _UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowercase ='hello' # `hello` is a token in the vocabulary of `pretrained_name`
_lowercase =F'''{text_of_1_token} {text_of_1_token}'''
_lowercase =self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
_lowercase =tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ) + 1, len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
_lowercase =self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
_lowercase =tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ) + 1, len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
_lowercase =self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
_lowercase =tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ), len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
_lowercase =self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
_lowercase =tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ), len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
_lowercase =F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_lowercase =self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
_lowercase =tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ) + 1, 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
_lowercase =self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
_lowercase =tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ), 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
_lowercase =self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
_lowercase =tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ), 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
| 291 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCamelCase ( __magic_name__ : Dict , __magic_name__ : List[str]=7 ) -> Dict:
"""simple docstring"""
lowercase__ = None
if token is not None:
lowercase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
lowercase__ = """636036"""
lowercase__ = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
lowercase__ = requests.get(__magic_name__ , headers=__magic_name__ ).json()
return result["workflow_runs"]
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
lowercase__ = get_daily_ci_runs(__magic_name__ )
lowercase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase__ = workflow_run["""id"""]
break
return workflow_run_id
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> str:
"""simple docstring"""
lowercase__ = get_last_daily_ci_runs(__magic_name__ )
if workflow_run_id is not None:
lowercase__ = get_artifacts_links(worflow_run_id=__magic_name__ , token=__magic_name__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=__magic_name__ , artifact_url=__magic_name__ , output_dir=__magic_name__ , token=__magic_name__ )
def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
get_last_daily_ci_artifacts(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = {}
for artifact_name in artifact_names:
lowercase__ = os.path.join(__magic_name__ , f'''{artifact_name}.zip''' )
if os.path.isfile(__magic_name__ ):
lowercase__ = {}
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
with z.open(__magic_name__ ) as f:
lowercase__ = f.read().decode("""UTF-8""" )
return results
| 15 | 0 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __SCREAMING_SNAKE_CASE (UpperCAmelCase__ ):
"""simple docstring"""
_a : str = ['''image_processor''', '''tokenizer''']
_a : int = '''AutoImageProcessor'''
_a : Dict = '''AutoTokenizer'''
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ):
"""simple docstring"""
a_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
a_ = kwargs.pop('feature_extractor' )
a_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
a_ = self.image_processor
a_ = False
def __call__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase , **_UpperCAmelCase )
a_ = kwargs.pop('images' , _UpperCAmelCase )
a_ = kwargs.pop('text' , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
a_ = args[0]
a_ = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
a_ = self.image_processor(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
if text is not None:
a_ = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
a_ = encodings['input_ids']
return inputs
def _a ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def _a ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@contextmanager
def _a ( self ):
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
a_ = True
a_ = self.tokenizer
yield
a_ = self.image_processor
a_ = False
def _a ( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=None ):
"""simple docstring"""
if added_vocab is None:
a_ = self.tokenizer.get_added_vocab()
a_ = {}
while tokens:
a_ = re.search(R'<s_(.*?)>' , _UpperCAmelCase , re.IGNORECASE )
if start_token is None:
break
a_ = start_token.group(1 )
a_ = re.search(Rf'</s_{key}>' , _UpperCAmelCase , re.IGNORECASE )
a_ = start_token.group()
if end_token is None:
a_ = tokens.replace(_UpperCAmelCase , '' )
else:
a_ = end_token.group()
a_ = re.escape(_UpperCAmelCase )
a_ = re.escape(_UpperCAmelCase )
a_ = re.search(f'{start_token_escaped}(.*?){end_token_escaped}' , _UpperCAmelCase , re.IGNORECASE )
if content is not None:
a_ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
a_ = self.tokenajson(_UpperCAmelCase , is_inner_value=_UpperCAmelCase , added_vocab=_UpperCAmelCase )
if value:
if len(_UpperCAmelCase ) == 1:
a_ = value[0]
a_ = value
else: # leaf nodes
a_ = []
for leaf in content.split(R'<sep/>' ):
a_ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
a_ = leaf[1:-2] # for categorical special tokens
output[key].append(_UpperCAmelCase )
if len(output[key] ) == 1:
a_ = output[key][0]
a_ = tokens[tokens.find(_UpperCAmelCase ) + len(_UpperCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_UpperCAmelCase , added_vocab=_UpperCAmelCase )
if len(_UpperCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _a ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def _a ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 536 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class A ( unittest.TestCase ):
'''simple docstring'''
A__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase , top_k=2 )
lowercase__ = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
for example in examples:
lowercase__ = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{"""score""": ANY(_UpperCAmelCase ), """label""": ANY(_UpperCAmelCase )},
{"""score""": ANY(_UpperCAmelCase ), """label""": ANY(_UpperCAmelCase )},
] , )
@require_torch
def lowerCamelCase__ (self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
lowercase__ = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
lowercase__ = pipeline(
"""video-classification""" , model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , frame_sampling_rate=4 )
lowercase__ = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
lowercase__ = video_classifier(_UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}] , )
lowercase__ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
] , )
@require_tf
def lowerCamelCase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
| 15 | 0 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
lowerCamelCase_ = field(default=UpperCAmelCase__ , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
lowerCamelCase_ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase_ = field(default=UpperCAmelCase__ , metadata={'''help''': '''whether to use adafactor'''} )
lowerCamelCase_ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
lowerCamelCase_ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
lowerCamelCase_ = field(default=UpperCAmelCase__ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
lowerCamelCase_ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
lowerCamelCase_ = field(
default='''linear''' , metadata={'''help''': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 558 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : Optional[Any] = 1_6
A : str = 3_2
def UpperCamelCase ( __magic_name__ : Accelerator , __magic_name__ : int = 16 ) -> List[str]:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__magic_name__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__magic_name__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
__magic_name__ , padding="""longest""" , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
lowercase__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A : Union[str, Any] = mocked_dataloaders # noqa: F811
def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __magic_name__ ) == "1":
lowercase__ = 2
# New Code #
lowercase__ = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowercase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__magic_name__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["""lr"""]
lowercase__ = int(config["""num_epochs"""] )
lowercase__ = int(config["""seed"""] )
lowercase__ = int(config["""batch_size"""] )
lowercase__ = evaluate.load("""glue""" , """mrpc""" )
set_seed(__magic_name__ )
lowercase__ , lowercase__ = get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=__magic_name__ )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=100 , num_training_steps=(len(__magic_name__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__magic_name__ ):
lowercase__ = model(**__magic_name__ )
lowercase__ = output.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**__magic_name__ )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __magic_name__ )
def UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__magic_name__ , default=__magic_name__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__magic_name__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowercase__ = parser.parse_args()
lowercase__ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 15 | 0 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
A_ = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
A_ = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
A_ = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self: Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Tuple ):
UpperCamelCase_ ={prediction["id"]: prediction["prediction_text"] for prediction in predictions}
UpperCamelCase_ =[
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
UpperCamelCase_ =evaluate(dataset=_UpperCAmelCase , predictions=_UpperCAmelCase )
return score
| 391 |
import os
import sys
A : Tuple = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
A : Dict = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : List[Any] ) -> str:
"""simple docstring"""
return AutoConfig.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCamelCase ( *__magic_name__ : Any , **__magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return AutoModel.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCamelCase ( *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCamelCase ( *__magic_name__ : Any , **__magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCamelCase ( *__magic_name__ : Dict , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*__magic_name__ , **__magic_name__ )
| 15 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : Dict = LxmertTokenizer
__lowerCAmelCase : List[Any] = LxmertTokenizerFast
__lowerCAmelCase : Any = True
__lowerCAmelCase : Optional[int] = True
def __UpperCamelCase ( self ) -> List[Any]:
super().setUp()
_a : str = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Tuple:
_a : Dict = 'UNwant\u00E9d,running'
_a : Union[str, Any] = 'unwanted, running'
return input_text, output_text
def __UpperCamelCase ( self ) -> Union[str, Any]:
_a : List[str] = self.tokenizer_class(self.vocab_file )
_a : Any = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [7, 4, 5, 1_0, 8, 9] )
def __UpperCamelCase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
_a : Tuple = self.get_tokenizer()
_a : Union[str, Any] = self.get_rust_tokenizer()
_a : Dict = 'I was born in 92000, and this is falsé.'
_a : Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
_a : List[Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
_a : Tuple = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
_a : str = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
_a : Tuple = self.get_rust_tokenizer()
_a : Optional[Any] = tokenizer.encode(_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
| 120 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any=13 , _UpperCAmelCase : str=7 , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Optional[int]=99 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Dict="last" , _UpperCAmelCase : Any=None , _UpperCAmelCase : int=None , ) -> int:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_lengths
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = gelu_activation
lowercase__ = sinusoidal_embeddings
lowercase__ = causal
lowercase__ = asm
lowercase__ = n_langs
lowercase__ = vocab_size
lowercase__ = n_special
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = summary_type
lowercase__ = use_proj
lowercase__ = scope
def lowerCamelCase__ (self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_input_lengths:
lowercase__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , 2 ).float()
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCamelCase__ (self : int ) -> Dict:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = FlaubertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , lengths=_UpperCAmelCase , langs=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , langs=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = FlaubertWithLMHeadModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = FlaubertForQuestionAnsweringSimple(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , ) -> List[Any]:
"""simple docstring"""
lowercase__ = FlaubertForQuestionAnswering(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , p_mask=_UpperCAmelCase , )
lowercase__ = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , )
((lowercase__) , ) = result_with_labels.to_tuple()
lowercase__ = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
((lowercase__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , ) -> List[str]:
"""simple docstring"""
lowercase__ = FlaubertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , ) -> str:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = FlaubertForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , ) -> List[str]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = FlaubertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
A__ = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCamelCase__ (self : Any , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
lowercase__ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def lowerCamelCase__ (self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = FlaubertModelTester(self )
lowercase__ = ConfigTester(self , config_class=_UpperCAmelCase , emb_dim=37 )
def lowerCamelCase__ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_UpperCAmelCase )
@slow
def lowerCamelCase__ (self : int ) -> List[Any]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = FlaubertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@slow
@require_torch_gpu
def lowerCamelCase__ (self : Dict ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ = True
lowercase__ = model_class(config=_UpperCAmelCase )
lowercase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = torch.jit.trace(
_UpperCAmelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , """traced_model.pt""" ) )
lowercase__ = torch.jit.load(os.path.join(_UpperCAmelCase , """traced_model.pt""" ) , map_location=_UpperCAmelCase )
loaded(inputs_dict["""input_ids"""].to(_UpperCAmelCase ) , inputs_dict["""attention_mask"""].to(_UpperCAmelCase ) )
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase__ (self : Tuple ) -> Dict:
"""simple docstring"""
lowercase__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowercase__ = model(_UpperCAmelCase )[0]
lowercase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__ = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 15 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=UpperCAmelCase__ ):
UpperCAmelCase__ = ["onnx"]
def __init__( self : List[Any] , *__snake_case : Tuple , **__snake_case : Any ) -> List[str]:
requires_backends(self , ["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls : Dict , *__snake_case : List[Any] , **__snake_case : str ) -> Optional[int]:
requires_backends(cls , ["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls : List[Any] , *__snake_case : Dict , **__snake_case : str ) -> Any:
requires_backends(cls , ["""onnx"""] )
| 96 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase ( __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
lowercase__ = emb.weight.data
return lin_layer
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str="facebook/mbart-large-en-ro" , __magic_name__ : Any=False , __magic_name__ : int=False ) -> Optional[int]:
"""simple docstring"""
lowercase__ = torch.load(__magic_name__ , map_location="""cpu""" )["""model"""]
remove_ignore_keys_(__magic_name__ )
lowercase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0]
lowercase__ = MBartConfig.from_pretrained(__magic_name__ , vocab_size=__magic_name__ )
if mbart_aa and finetuned:
lowercase__ = """relu"""
lowercase__ = state_dict["""decoder.embed_tokens.weight"""]
lowercase__ = MBartForConditionalGeneration(__magic_name__ )
model.model.load_state_dict(__magic_name__ )
if finetuned:
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
A : Any = parser.parse_args()
A : str = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 15 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "ylacombe/bark-small"
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = "en_speaker_1"
SCREAMING_SNAKE_CASE_ = "This is a test string"
SCREAMING_SNAKE_CASE_ = "speaker_embeddings_path.json"
SCREAMING_SNAKE_CASE_ = "speaker_embeddings"
def __A ( self : str , **__magic_name__ : Optional[int] ) -> str:
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def __A ( self : str ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def __A ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BarkProcessor(tokenizer=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __A ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __A ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
SCREAMING_SNAKE_CASE_ = 35
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 8
SCREAMING_SNAKE_CASE_ = {
"semantic_prompt": np.ones(_UpperCAmelCase ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
SCREAMING_SNAKE_CASE_ = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , "file.npz" )
np.savez(_UpperCAmelCase , **_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
SCREAMING_SNAKE_CASE_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BarkProcessor(tokenizer=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = processor(text=self.input_string )
SCREAMING_SNAKE_CASE_ = tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 140 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def UpperCamelCase ( __magic_name__ : Any ) -> int:
"""simple docstring"""
lowercase__ = model.config
lowercase__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
lowercase__ = MBartConfig(
is_decoder=__magic_name__ , is_encoder_decoder=__magic_name__ , add_cross_attention=__magic_name__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__magic_name__ , add_final_layer_norm=__magic_name__ , )
return encoder_config, decoder_config
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if "encoder.model" in name:
lowercase__ = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
lowercase__ = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
lowercase__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
lowercase__ = """encoder.""" + name
if "attn.proj" in name:
lowercase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
lowercase__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
lowercase__ = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
lowercase__ = """encoder.layernorm.bias"""
return name
def UpperCamelCase ( __magic_name__ : Any , __magic_name__ : str ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase__ = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
lowercase__ = key.split(""".""" )
lowercase__ = int(key_split[3] )
lowercase__ = int(key_split[5] )
lowercase__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[dim : dim * 2, :]
lowercase__ = val[-dim:, :]
else:
lowercase__ = val[:dim]
lowercase__ = val[dim : dim * 2]
lowercase__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowercase__ = val
return orig_state_dict
def UpperCamelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : List[Any]=None , __magic_name__ : Dict=False ) -> int:
"""simple docstring"""
lowercase__ = DonutModel.from_pretrained(__magic_name__ ).eval()
# load HuggingFace model
lowercase__ , lowercase__ = get_configs(__magic_name__ )
lowercase__ = DonutSwinModel(__magic_name__ )
lowercase__ = MBartForCausalLM(__magic_name__ )
lowercase__ = VisionEncoderDecoderModel(encoder=__magic_name__ , decoder=__magic_name__ )
model.eval()
lowercase__ = original_model.state_dict()
lowercase__ = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# verify results on scanned document
lowercase__ = load_dataset("""hf-internal-testing/example-documents""" )
lowercase__ = dataset["""test"""][0]["""image"""].convert("""RGB""" )
lowercase__ = XLMRobertaTokenizerFast.from_pretrained(__magic_name__ , from_slow=__magic_name__ )
lowercase__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowercase__ = DonutProcessor(__magic_name__ , __magic_name__ )
lowercase__ = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowercase__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowercase__ = """When is the coffee break?"""
lowercase__ = task_prompt.replace("""{user_input}""" , __magic_name__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowercase__ = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowercase__ = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowercase__ = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowercase__ = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowercase__ = """hello world"""
else:
raise ValueError("""Model name not supported""" )
lowercase__ = original_model.decoder.tokenizer(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors="""pt""" )[
"""input_ids"""
]
lowercase__ = original_model.encoder.model.patch_embed(__magic_name__ )
lowercase__ , lowercase__ = model.encoder.embeddings(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 )
# verify encoder hidden states
lowercase__ = original_model.encoder(__magic_name__ )
lowercase__ = model.encoder(__magic_name__ ).last_hidden_state
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-2 )
# verify decoder hidden states
lowercase__ = original_model(__magic_name__ , __magic_name__ , __magic_name__ ).logits
lowercase__ = model(__magic_name__ , decoder_input_ids=__magic_name__ ).logits
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
A : Optional[int] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 15 | 0 |
'''simple docstring'''
def __snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
__UpperCAmelCase = str(bin(lowerCAmelCase ) )[2:] # remove the leading "0b"
__UpperCAmelCase = str(bin(lowerCAmelCase ) )[2:] # remove the leading "0b"
__UpperCAmelCase = max(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowerCAmelCase ) , b_binary.zfill(lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 396 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 15 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 646 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Any , _UpperCAmelCase : NestedDataStructureLike[PathLike] , _UpperCAmelCase : Optional[NamedSplit] = None , _UpperCAmelCase : Optional[Features] = None , _UpperCAmelCase : str = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ) -> List[str]:
"""simple docstring"""
super().__init__(
_UpperCAmelCase , split=_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase , streaming=_UpperCAmelCase , num_proc=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase , data_files=_UpperCAmelCase , features=_UpperCAmelCase , **_UpperCAmelCase , )
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase , download_mode=_UpperCAmelCase , verification_mode=_UpperCAmelCase , base_path=_UpperCAmelCase , num_proc=self.num_proc , )
lowercase__ = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 15 | 0 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__A : Union[str, Any] = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__A : int = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__A : int = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def a__ ( self :List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,)
def a__ ( self :Any ,_UpperCamelCase :str ,_UpperCamelCase :Any ):
snake_case_ : Dict = 0.0
for i, j in zip(_UpperCAmelCase ,_UpperCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(_UpperCAmelCase ,_UpperCAmelCase ) else 0.0
snake_case_ : str = n_correct / len(_UpperCAmelCase )
return {
"accuracy": accuracy,
} | 334 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = """ylacombe/bark-small"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = """en_speaker_1"""
lowercase__ = """This is a test string"""
lowercase__ = """speaker_embeddings_path.json"""
lowercase__ = """speaker_embeddings"""
def lowerCamelCase__ (self : str , **_UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = BarkProcessor(tokenizer=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase__ (self : str ) -> Tuple:
"""simple docstring"""
lowercase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase__ = 35
lowercase__ = 2
lowercase__ = 8
lowercase__ = {
"""semantic_prompt""": np.ones(_UpperCAmelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase__ = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
lowercase__ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase__ = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
lowercase__ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase__ = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = BarkProcessor(tokenizer=_UpperCAmelCase )
lowercase__ = processor(text=self.input_string )
lowercase__ = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 15 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __snake_case ( UpperCAmelCase__):
_lowerCAmelCase = '''poolformer'''
def __init__( self, A=3, A=16, A=16, A=3, A=4.0, A=[2, 2, 6, 2], A=[64, 128, 320, 512], A=[7, 3, 3, 3], A=[4, 2, 2, 2], A=[2, 1, 1, 1], A=4, A=0.0, A="gelu", A=True, A=1e-5, A=0.02, **A, ):
"""simple docstring"""
lowerCamelCase : List[Any] = num_channels
lowerCamelCase : Optional[Any] = patch_size
lowerCamelCase : int = stride
lowerCamelCase : Optional[Any] = padding
lowerCamelCase : int = pool_size
lowerCamelCase : Optional[Any] = hidden_sizes
lowerCamelCase : Tuple = mlp_ratio
lowerCamelCase : Tuple = depths
lowerCamelCase : List[str] = patch_sizes
lowerCamelCase : int = strides
lowerCamelCase : List[Any] = num_encoder_blocks
lowerCamelCase : Optional[Any] = drop_path_rate
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Optional[Any] = use_layer_scale
lowerCamelCase : int = layer_scale_init_value
lowerCamelCase : Optional[int] = initializer_range
super().__init__(**_UpperCAmelCase )
class __snake_case ( UpperCAmelCase__):
_lowerCAmelCase = version.parse('''1.11''')
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return 2e-3
| 320 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
A : Optional[Any] = logging.get_logger(__name__)
A : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A : Any = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : Optional[int] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : int = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
A : Optional[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
A : Any = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
A : Union[str, Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
A : Tuple = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
A : Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ = DPRContextEncoderTokenizer
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ = DPRQuestionEncoderTokenizer
A : Any = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
A : Optional[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
A : int = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(UpperCAmelCase__ )
class A :
'''simple docstring'''
def __call__(self : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Union[bool, str] = False , _UpperCAmelCase : Union[bool, str] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[bool] = None , **_UpperCAmelCase : Any , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
elif titles is None or texts is None:
lowercase__ = titles if texts is None else texts
return super().__call__(
_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = titles if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [titles]
lowercase__ = texts if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [texts]
lowercase__ = len(_UpperCAmelCase )
lowercase__ = questions if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [questions] * n_passages
assert len(_UpperCAmelCase ) == len(
_UpperCAmelCase ), f'''There should be as many titles than texts but got {len(_UpperCAmelCase )} titles and {len(_UpperCAmelCase )} texts.'''
lowercase__ = super().__call__(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["""input_ids"""]
lowercase__ = super().__call__(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["""input_ids"""]
lowercase__ = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_UpperCAmelCase , _UpperCAmelCase )
]
}
if return_attention_mask is not False:
lowercase__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase__ = attention_mask
return self.pad(_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : BatchEncoding , _UpperCAmelCase : DPRReaderOutput , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
lowercase__ = reader_input["""input_ids"""]
lowercase__ , lowercase__ , lowercase__ = reader_output[:3]
lowercase__ = len(_UpperCAmelCase )
lowercase__ = sorted(range(_UpperCAmelCase ) , reverse=_UpperCAmelCase , key=relevance_logits.__getitem__ )
lowercase__ = []
for doc_id in sorted_docs:
lowercase__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase__ = sequence_ids.index(self.pad_token_id )
else:
lowercase__ = len(_UpperCAmelCase )
lowercase__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_UpperCAmelCase , top_spans=_UpperCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_UpperCAmelCase , start_index=_UpperCAmelCase , end_index=_UpperCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_UpperCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : int , _UpperCAmelCase : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
lowercase__ = []
for start_index, start_score in enumerate(_UpperCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase__ = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
lowercase__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
lowercase__ = end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_UpperCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class A ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = READER_PRETRAINED_VOCAB_FILES_MAP
A__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = READER_PRETRAINED_INIT_CONFIGURATION
A__ = ['''input_ids''', '''attention_mask''']
A__ = DPRReaderTokenizer
| 15 | 0 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowercase_ = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a ( A__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a ( A__ : int , A__ : str ) -> Optional[int]:
"""simple docstring"""
if args.student_type == "roberta":
_lowercase =False
elif args.student_type == "gpt2":
_lowercase =False
def a ( A__ : Tuple , A__ : Tuple ) -> Tuple:
"""simple docstring"""
if args.student_type == "roberta":
_lowercase =False
def a ( ) -> str:
"""simple docstring"""
_lowercase =argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' , type=A__ , required=A__ , help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' , type=A__ , required=A__ , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=A__ , choices=['distilbert', 'roberta', 'gpt2'] , required=A__ , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=A__ , required=A__ , help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' , default=A__ , type=A__ , help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=A__ , help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' , type=A__ , required=A__ , help='The teacher model.' )
parser.add_argument('--temperature' , default=2.0 , type=A__ , help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' , default=0.5 , type=A__ , help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=A__ , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=A__ , help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' , default=0.0 , type=A__ , help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' , default=0.0 , type=A__ , help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' , default=0.15 , type=A__ , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=A__ , help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' , default=0.1 , type=A__ , help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' , default=0.1 , type=A__ , help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=A__ , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=A__ , help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=A__ , default=3 , help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' , type=A__ , default=5 , help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=A__ , default=50 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.05 , type=A__ , help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' , default=0.0 , type=A__ , help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' , default=5e-4 , type=A__ , help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' , default=1e-6 , type=A__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , default=5.0 , type=A__ , help='Max gradient norm.' )
parser.add_argument('--initializer_range' , default=0.02 , type=A__ , help='Random initialization range.' )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=A__ , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=A__ , default=1 , help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' , type=A__ , default=-1 , help='Distributed training - Local rank' )
parser.add_argument('--seed' , type=A__ , default=56 , help='Random seed' )
parser.add_argument('--log_interval' , type=A__ , default=500 , help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' , type=A__ , default=4000 , help='Checkpoint interval.' )
_lowercase =parser.parse_args()
sanity_checks(A__ )
# ARGS #
init_gpu_params(A__ )
set_seed(A__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f:
json.dump(vars(A__ ) , A__ , indent=4 )
git_log(args.dump_path )
_lowercase , _lowercase , _lowercase =MODEL_CLASSES[args.student_type]
_lowercase , _lowercase , _lowercase =MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_lowercase =teacher_tokenizer_class.from_pretrained(args.teacher_name )
_lowercase ={}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_lowercase =tokenizer.all_special_tokens.index(A__ )
_lowercase =tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
_lowercase =special_tok_ids
_lowercase =tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , 'rb' ) as fp:
_lowercase =pickle.load(A__ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , 'rb' ) as fp:
_lowercase =pickle.load(A__ )
_lowercase =np.maximum(A__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_lowercase =0.0 # do not predict special tokens
_lowercase =torch.from_numpy(A__ )
else:
_lowercase =None
_lowercase =LmSeqsDataset(params=A__ , data=A__ )
logger.info('Data loader created.' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
_lowercase =student_config_class.from_pretrained(args.student_config )
_lowercase =True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
_lowercase =student_model_class.from_pretrained(args.student_pretrained_weights , config=A__ )
else:
_lowercase =student_model_class(A__ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('Student loaded.' )
# TEACHER #
_lowercase =teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=A__ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A__ , A__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A__ , A__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_lowercase =Distiller(
params=A__ , dataset=A__ , token_probs=A__ , student=A__ , teacher=A__ )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 291 |
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[int] ) -> list[int]: # This function is recursive
"""simple docstring"""
lowercase__ = len(__magic_name__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ = array[0]
lowercase__ = False
lowercase__ = 1
lowercase__ = []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ = True
lowercase__ = [element for element in array[i:] if element >= array[i]]
lowercase__ = longest_subsequence(__magic_name__ )
if len(__magic_name__ ) > len(__magic_name__ ):
lowercase__ = temp_array
else:
i += 1
lowercase__ = [element for element in array[1:] if element >= pivot]
lowercase__ = [pivot, *longest_subsequence(__magic_name__ )]
if len(__magic_name__ ) > len(__magic_name__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
__lowerCAmelCase = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
__lowerCAmelCase = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
__lowerCAmelCase = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__lowerCAmelCase = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__lowerCAmelCase = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __SCREAMING_SNAKE_CASE (UpperCAmelCase__ ):
"""simple docstring"""
_a : Any = VOCAB_FILES_NAMES
_a : Any = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_a : Dict = DPRContextEncoderTokenizer
class __SCREAMING_SNAKE_CASE (UpperCAmelCase__ ):
"""simple docstring"""
_a : List[str] = VOCAB_FILES_NAMES
_a : Dict = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a : Tuple = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_a : Tuple = DPRQuestionEncoderTokenizer
__lowerCAmelCase = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
__lowerCAmelCase = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
__lowerCAmelCase = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(UpperCAmelCase__ )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __call__( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
elif titles is None or texts is None:
a_ = titles if texts is None else texts
return super().__call__(
_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
a_ = titles if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [titles]
a_ = texts if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [texts]
a_ = len(_UpperCAmelCase )
a_ = questions if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [questions] * n_passages
assert len(_UpperCAmelCase ) == len(
_UpperCAmelCase ), f'There should be as many titles than texts but got {len(_UpperCAmelCase )} titles and {len(_UpperCAmelCase )} texts.'
a_ = super().__call__(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )['input_ids']
a_ = super().__call__(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )['input_ids']
a_ = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_UpperCAmelCase , _UpperCAmelCase )
]
}
if return_attention_mask is not False:
a_ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
a_ = attention_mask
return self.pad(_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
def _a ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 16 , UpperCamelCase__ = 64 , UpperCamelCase__ = 4 , ):
"""simple docstring"""
a_ = reader_input['input_ids']
a_ , a_ , a_ = reader_output[:3]
a_ = len(_UpperCAmelCase )
a_ = sorted(range(_UpperCAmelCase ) , reverse=_UpperCAmelCase , key=relevance_logits.__getitem__ )
a_ = []
for doc_id in sorted_docs:
a_ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
a_ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
a_ = sequence_ids.index(self.pad_token_id )
else:
a_ = len(_UpperCAmelCase )
a_ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_UpperCAmelCase , top_spans=_UpperCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_UpperCAmelCase , start_index=_UpperCAmelCase , end_index=_UpperCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_UpperCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _a ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
"""simple docstring"""
a_ = []
for start_index, start_score in enumerate(_UpperCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
a_ = sorted(_UpperCAmelCase , key=lambda UpperCamelCase__ : x[1] , reverse=_UpperCAmelCase )
a_ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
a_ = end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_UpperCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class __SCREAMING_SNAKE_CASE (UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
_a : Union[str, Any] = VOCAB_FILES_NAMES
_a : List[str] = READER_PRETRAINED_VOCAB_FILES_MAP
_a : str = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Union[str, Any] = READER_PRETRAINED_INIT_CONFIGURATION
_a : Optional[Any] = ['''input_ids''', '''attention_mask''']
_a : Optional[Any] = DPRReaderTokenizer
| 536 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
A : List[Any] = None
A : Optional[Any] = logging.get_logger(__name__)
A : str = '▁'
A : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
A : Any = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
A : Optional[int] = {
'google/pegasus-xsum': 5_1_2,
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = PegasusTokenizer
A__ = ['''input_ids''', '''attention_mask''']
def __init__(self : Tuple , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Union[str, Any]="<pad>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : int="<unk>" , _UpperCAmelCase : Optional[Any]="<mask_2>" , _UpperCAmelCase : Optional[Any]="<mask_1>" , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Dict=103 , **_UpperCAmelCase : Dict , ) -> Dict:
"""simple docstring"""
lowercase__ = offset
if additional_special_tokens is not None:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(_UpperCAmelCase )}, but is'''
f''' {type(_UpperCAmelCase )}''' )
lowercase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(_UpperCAmelCase ) , self.offset - 1 )
]
if len(set(_UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
lowercase__ = additional_special_tokens_extended
else:
lowercase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , pad_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , mask_token_sent=_UpperCAmelCase , offset=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List , _UpperCAmelCase : Optional[List] = None , _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(_UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(_UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCamelCase__ (self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ (self : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 15 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ = '''swin'''
lowerCamelCase_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowercase=2_2_4 , lowercase=4 , lowercase=3 , lowercase=9_6 , lowercase=[2, 2, 6, 2] , lowercase=[3, 6, 1_2, 2_4] , lowercase=7 , lowercase=4.0 , lowercase=True , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase="gelu" , lowercase=False , lowercase=0.02 , lowercase=1E-5 , lowercase=3_2 , lowercase=None , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
A_ : List[Any] = image_size
A_ : List[str] = patch_size
A_ : Dict = num_channels
A_ : Union[str, Any] = embed_dim
A_ : Optional[int] = depths
A_ : str = len(_UpperCAmelCase )
A_ : Dict = num_heads
A_ : Tuple = window_size
A_ : Tuple = mlp_ratio
A_ : Any = qkv_bias
A_ : Tuple = hidden_dropout_prob
A_ : int = attention_probs_dropout_prob
A_ : List[str] = drop_path_rate
A_ : Union[str, Any] = hidden_act
A_ : List[Any] = use_absolute_embeddings
A_ : Tuple = layer_norm_eps
A_ : Tuple = initializer_range
A_ : Any = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A_ : Optional[int] = int(embed_dim * 2 ** (len(_UpperCAmelCase ) - 1) )
A_ : Dict = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(_UpperCAmelCase ) + 1 )]
A_ , A_ : Any = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ = version.parse('''1.11''' )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 1E-4
| 558 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
A : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
A : List[str] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
A : Any = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int = CHRF.CHAR_ORDER , _UpperCAmelCase : int = CHRF.WORD_ORDER , _UpperCAmelCase : int = CHRF.BETA , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , ) -> int:
"""simple docstring"""
lowercase__ = len(references[0] )
if any(len(_UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowercase__ = [[refs[i] for refs in references] for i in range(_UpperCAmelCase )]
lowercase__ = CHRF(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__ = sb_chrf.corpus_score(_UpperCAmelCase , _UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 15 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def _UpperCamelCase ( A , A , A , A , A ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(A ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , )
return min(
minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , )
def _UpperCamelCase ( ):
UpperCamelCase_ =[90, 23, 6, 33, 21, 65, 123, 34_423]
UpperCamelCase_ =math.log(len(A ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , A , A , A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 391 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = PegasusTokenizer(_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def lowerCamelCase__ (self : Tuple , **_UpperCAmelCase : int ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase__ (self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = """</s>"""
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_UpperCAmelCase ) , 1103 )
def lowerCamelCase__ (self : str ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowerCamelCase__ (self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
lowercase__ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
lowercase__ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
lowercase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowercase__ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowercase__ = """To ensure a smooth flow of bank resolutions."""
lowercase__ = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowercase__ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = ["""This is going to be way too long.""" * 150, """short example"""]
lowercase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowercase__ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
lowercase__ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def lowerCamelCase__ (self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = PegasusTokenizer(_UpperCAmelCase , offset=0 , mask_token_sent=_UpperCAmelCase , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def lowerCamelCase__ (self : str , **_UpperCAmelCase : Union[str, Any] ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
lowercase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
lowercase__ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
lowercase__ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@require_torch
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = ["""This is going to be way too long.""" * 1000, """short example"""]
lowercase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowercase__ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
lowercase__ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
def lowerCamelCase__ (self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
lowercase__ = self._large_tokenizer(_UpperCAmelCase ).input_ids
self.assertListEqual(
_UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 15 | 0 |
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def UpperCAmelCase_ ( A , A , A , A ):
'''simple docstring'''
def constraint_to_multiple_of(A , A , A=0 , A=None ):
_a : Dict = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_a : Any = math.floor(val / multiple ) * multiple
if x < min_val:
_a : Union[str, Any] = math.ceil(val / multiple ) * multiple
return x
_a : Tuple = (output_size, output_size) if isinstance(A , A ) else output_size
_a , _a : str = get_image_size(A )
_a , _a : Optional[Any] = output_size
# determine new height and width
_a : Optional[int] = output_height / input_height
_a : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_a : int = scale_width
else:
# fit height
_a : str = scale_height
_a : int = constraint_to_multiple_of(scale_height * input_height , multiple=A )
_a : str = constraint_to_multiple_of(scale_width * input_width , multiple=A )
return (new_height, new_width)
class a ( UpperCAmelCase__ ):
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = ["""pixel_values"""]
def __init__( self , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = PILImageResampling.BILINEAR , lowerCamelCase_ = False , lowerCamelCase_ = 1 , lowerCamelCase_ = True , lowerCamelCase_ = 1 / 2_5_5 , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
super().__init__(**_UpperCAmelCase )
_a : Optional[int] = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
_a : Dict = get_size_dict(_UpperCAmelCase )
_a : Optional[Any] = do_resize
_a : Optional[int] = size
_a : List[Any] = keep_aspect_ratio
_a : Union[str, Any] = ensure_multiple_of
_a : Any = resample
_a : Optional[int] = do_rescale
_a : Any = rescale_factor
_a : Optional[int] = do_normalize
_a : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = 1 , lowerCamelCase_ = PILImageResampling.BICUBIC , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
_a : Union[str, Any] = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
_a : Dict = get_resize_output_image_size(
_UpperCAmelCase , output_size=(size['height'], size['width']) , keep_aspect_ratio=_UpperCAmelCase , multiple=_UpperCAmelCase , )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> int:
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = ChannelDimension.FIRST , **lowerCamelCase_ , ) -> PIL.Image.Image:
_a : Dict = do_resize if do_resize is not None else self.do_resize
_a : str = size if size is not None else self.size
_a : List[str] = get_size_dict(_UpperCAmelCase )
_a : Tuple = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_a : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_a : int = resample if resample is not None else self.resample
_a : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_a : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : str = do_normalize if do_normalize is not None else self.do_normalize
_a : Dict = image_mean if image_mean is not None else self.image_mean
_a : Optional[int] = image_std if image_std is not None else self.image_std
_a : Dict = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_a : str = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
_a : List[Any] = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
_a : List[Any] = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
_a : Union[str, Any] = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
_a : Any = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
_a : str = {'pixel_values': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Dict:
_a : int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_UpperCAmelCase ):
_a : Any = target_sizes.numpy()
_a : Optional[int] = []
for idx in range(len(_UpperCAmelCase ) ):
_a : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_UpperCAmelCase )
_a : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_UpperCAmelCase )
else:
_a : str = logits.argmax(dim=1 )
_a : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 120 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def UpperCamelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowercase__ = load_file(__magic_name__ )
lowercase__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowercase__ = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
lowercase__ = pipeline.text_encoder
else:
lowercase__ = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
lowercase__ = pipeline.unet
# find the target layer
lowercase__ = layer_infos.pop(0 )
while len(__magic_name__ ) > -1:
try:
lowercase__ = curr_layer.__getattr__(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase__ = layer_infos.pop(0 )
elif len(__magic_name__ ) == 0:
break
except Exception:
if len(__magic_name__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowercase__ = layer_infos.pop(0 )
lowercase__ = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(__magic_name__ )
else:
pair_keys.append(__magic_name__ )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowercase__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowercase__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__magic_name__ , __magic_name__ ).unsqueeze(2 ).unsqueeze(3 )
else:
lowercase__ = state_dict[pair_keys[0]].to(torch.floataa )
lowercase__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__magic_name__ , __magic_name__ )
# update visited list
for item in pair_keys:
visited.append(__magic_name__ )
return pipeline
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
A : str = parser.parse_args()
A : Tuple = args.base_model_path
A : List[str] = args.checkpoint_path
A : Optional[int] = args.dump_path
A : Optional[int] = args.lora_prefix_unet
A : Any = args.lora_prefix_text_encoder
A : Any = args.alpha
A : List[str] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
A : int = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 15 | 0 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowerCamelCase = ['text', 'image', 'audio']
def a ( __UpperCAmelCase : List[str] ) -> Optional[Any]:
__magic_name__: Any = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
inputs.append(create_inputs(__UpperCAmelCase ) )
else:
raise ValueError(f'Invalid type requested: {input_type}' )
return inputs
def a ( __UpperCAmelCase : List ) -> Tuple:
__magic_name__: Union[str, Any] = []
for output in outputs:
if isinstance(__UpperCAmelCase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__UpperCAmelCase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__UpperCAmelCase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'Invalid output: {output}' )
return output_types
@is_tool_test
class __A :
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
__magic_name__: Any = self.tool.inputs
for _input in inputs:
if isinstance(_input , _UpperCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__magic_name__: int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase__ ( self : List[Any] ) -> int:
__magic_name__: Union[str, Any] = create_inputs(self.tool.inputs )
__magic_name__: Dict = self.tool(*_UpperCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
__magic_name__: Dict = [outputs]
self.assertListEqual(output_types(_UpperCAmelCase ) , self.tool.outputs )
def lowerCamelCase__ ( self : Any ) -> Any:
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
__magic_name__: List[str] = create_inputs(self.tool.inputs )
__magic_name__: Optional[Any] = self.tool(*_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__magic_name__: int = [outputs]
self.assertEqual(len(_UpperCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(_UpperCAmelCase , self.tool.outputs ):
__magic_name__: Union[str, Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
__magic_name__: Dict = create_inputs(self.tool.inputs )
__magic_name__: Union[str, Any] = []
for _input, input_type in zip(_UpperCAmelCase , self.tool.inputs ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__magic_name__: List[str] = self.tool(*_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__magic_name__: Optional[int] = [outputs]
self.assertEqual(len(_UpperCAmelCase ) , len(self.tool.outputs ) )
| 96 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Tuple = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''ibert'''
def __init__(self : int , _UpperCAmelCase : Union[str, Any]=3_0522 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[Any]=3072 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Dict=1E-1_2 , _UpperCAmelCase : int=1 , _UpperCAmelCase : str=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : int="absolute" , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[Any]="none" , **_UpperCAmelCase : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = quant_mode
lowercase__ = force_dequant
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase__ (self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 15 | 0 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase (UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase__ = (UnCLIPScheduler,)
def __A ( self : Union[str, Any] , **__magic_name__ : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = {
"num_train_timesteps": 1_000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**_UpperCAmelCase )
return config
def __A ( self : Union[str, Any] ) -> str:
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def __A ( self : Tuple ) -> Union[str, Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_UpperCAmelCase )
def __A ( self : Tuple ) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def __A ( self : Tuple ) -> Any:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_UpperCAmelCase )
def __A ( self : str ) -> List[Any]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def __A ( self : str ) -> Union[str, Any]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_UpperCAmelCase , prev_timestep=_UpperCAmelCase )
def __A ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(variance_type="fixed_small_log" )
SCREAMING_SNAKE_CASE_ = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1e-5
def __A ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(variance_type="learned_range" )
SCREAMING_SNAKE_CASE_ = scheduler_class(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = 0.5
assert scheduler._get_variance(1 , predicted_variance=_UpperCAmelCase ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_UpperCAmelCase ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_UpperCAmelCase ) - -0.001_0011 < 1e-5
def __A ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = scheduler.timesteps
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
for i, t in enumerate(_UpperCAmelCase ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE_ = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE_ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE_ = pred_prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def __A ( self : str ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(25 )
SCREAMING_SNAKE_CASE_ = scheduler.timesteps
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
for i, t in enumerate(_UpperCAmelCase ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE_ = model(_UpperCAmelCase , _UpperCAmelCase )
if i + 1 == timesteps.shape[0]:
SCREAMING_SNAKE_CASE_ = None
else:
SCREAMING_SNAKE_CASE_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE_ = scheduler.step(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , prev_timestep=_UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE_ = pred_prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def __A ( self : Optional[int] ) -> int:
pass
def __A ( self : List[str] ) -> str:
pass
| 140 |
from math import log
from scipy.constants import Boltzmann, physical_constants
A : Any = 3_0_0 # TEMPERATURE (unit = K)
def UpperCamelCase ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( lowerCAmelCase : list[int] , lowerCAmelCase : list[int] , lowerCAmelCase : int ):
__UpperCAmelCase = list(range(len(lowerCAmelCase ) ) )
__UpperCAmelCase = [v / w for v, w in zip(lowerCAmelCase , lowerCAmelCase )]
index.sort(key=lambda lowerCAmelCase : ratio[i] , reverse=lowerCAmelCase )
__UpperCAmelCase = 0
__UpperCAmelCase = [0] * len(lowerCAmelCase )
for i in index:
if weight[i] <= capacity:
__UpperCAmelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
__UpperCAmelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 396 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class A :
'''simple docstring'''
A__ = 42
A__ = None
A__ = None
def UpperCamelCase ( ) -> Node | None:
"""simple docstring"""
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> int:
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCamelCase ( __magic_name__ : Node | None ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCamelCase ( __magic_name__ : Node | None , __magic_name__ : int ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
def populate_output(__magic_name__ : Node | None , __magic_name__ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__magic_name__ , __magic_name__ )
return output
def UpperCamelCase ( __magic_name__ : Node | None , __magic_name__ : int ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
def populate_output(__magic_name__ : Node | None , __magic_name__ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__magic_name__ , __magic_name__ )
return output
def UpperCamelCase ( __magic_name__ : Node | None ) -> Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(__magic_name__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__magic_name__ , __magic_name__ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(__magic_name__ , __magic_name__ ) )
lowercase__ = 0
return output
def UpperCamelCase ( ) -> None: # Main function for testing.
"""simple docstring"""
lowercase__ = make_tree()
print(f'''In-order Traversal: {inorder(__magic_name__ )}''' )
print(f'''Pre-order Traversal: {preorder(__magic_name__ )}''' )
print(f'''Post-order Traversal: {postorder(__magic_name__ )}''' , """\n""" )
print(f'''Height of Tree: {height(__magic_name__ )}''' , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__magic_name__ ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__magic_name__ ) + 1 ):
print(f'''Level {level}:''' , get_nodes_from_left_to_right(__magic_name__ , level=__magic_name__ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 15 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.