code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_lowerCamelCase : int = logging.get_logger(__name__)
@add_end_docstrings(
__A , r'''\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ''' , )
class __UpperCAmelCase ( __A ):
'''simple docstring'''
def A (self : int , _lowerCAmelCase : Optional[int] ):
if self.framework == "tf":
A = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
A = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__lowercase )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def A (self : Dict , _lowerCAmelCase : List[Any] ):
A = self.get_masked_index(__lowercase )
A = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def A (self : List[str] , _lowerCAmelCase : Tuple ):
if isinstance(__lowercase , __lowercase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__lowercase )
def A (self : str , _lowerCAmelCase : int , _lowerCAmelCase : List[str]=None , **_lowerCAmelCase : Optional[Any] ):
if return_tensors is None:
A = self.framework
A = self.tokenizer(__lowercase , return_tensors=__lowercase )
self.ensure_exactly_one_mask_token(__lowercase )
return model_inputs
def A (self : int , _lowerCAmelCase : Optional[int] ):
A = self.model(**__lowercase )
A = model_inputs["""input_ids"""]
return model_outputs
def A (self : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=5 , _lowerCAmelCase : Tuple=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
A = target_ids.shape[0]
A = model_outputs["""input_ids"""][0]
A = model_outputs["""logits"""]
if self.framework == "tf":
A = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
A = outputs.numpy()
A = outputs[0, masked_index, :]
A = stable_softmax(__lowercase , axis=-1 )
if target_ids is not None:
A = tf.gather_nd(tf.squeeze(__lowercase , 0 ) , target_ids.reshape(-1 , 1 ) )
A = tf.expand_dims(__lowercase , 0 )
A = tf.math.top_k(__lowercase , k=__lowercase )
A , A = topk.values.numpy(), topk.indices.numpy()
else:
A = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__lowercase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
A = outputs[0, masked_index, :]
A = logits.softmax(dim=-1 )
if target_ids is not None:
A = probs[..., target_ids]
A , A = probs.topk(__lowercase )
A = []
A = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
A = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
A = input_ids.numpy().copy()
if target_ids is not None:
A = target_ids[p].tolist()
A = p
# Filter padding out:
A = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
A = self.tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
A = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(__lowercase )
result.append(__lowercase )
if single_mask:
return result[0]
return result
def A (self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int=None ):
if isinstance(__lowercase , __lowercase ):
A = [targets]
try:
A = self.tokenizer.get_vocab()
except Exception:
A = {}
A = []
for target in targets:
A = vocab.get(__lowercase , __lowercase )
if id_ is None:
A = self.tokenizer(
__lowercase , add_special_tokens=__lowercase , return_attention_mask=__lowercase , return_token_type_ids=__lowercase , max_length=1 , truncation=__lowercase , )["""input_ids"""]
if len(__lowercase ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
A = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
A = list(set(__lowercase ) )
if len(__lowercase ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
A = np.array(__lowercase )
return target_ids
def A (self : Tuple , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : str=None ):
A = {}
if targets is not None:
A = self.get_target_ids(__lowercase , __lowercase )
A = target_ids
if top_k is not None:
A = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__(self : Tuple , _lowerCAmelCase : Dict , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Any ):
A = super().__call__(__lowercase , **__lowercase )
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) == 1:
return outputs[0]
return outputs
| 359 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
else:
return a * actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(UpperCAmelCase , UpperCAmelCase )
return actual_power(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 337 | 0 |
'''simple docstring'''
import math
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
A = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__lowerCamelCase )
if number < 1:
A = f"""Input value of [number={number}] must be > 0"""
raise ValueError(__lowerCamelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
A = int(math.log(number // 3 , 2 ) ) + 2
A = [3, 5]
A = 2
A = 3
for block in range(1 , __lowerCamelCase ):
for _ in range(__lowerCamelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
_lowerCamelCase : Tuple = 0
try:
_lowerCamelCase : List[str] = proth(number)
except ValueError:
print(f"ValueError: there is no {number}th Proth number")
continue
print(f"The {number}th Proth number: {value}")
| 360 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
if isinstance(UpperCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCAmelCase :
'''simple docstring'''
def A (self : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ):
pass
def A (self : List[str] ):
pass
def A (self : Union[str, Any] ):
pass
def A (self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ):
A = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def A (self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A (self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=None , **_lowerCAmelCase : List[Any] ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = {"""vision_model""": vision_model, """text_model""": text_model}
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A (self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Any ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
A = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
A = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
A = after_output[0].numpy()
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def A (self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any=None , **_lowerCAmelCase : List[Any] ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
A = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
A = to_atuple(vision_model.config.image_size )
A = to_atuple(vision_model.config.patch_size )
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A (self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ):
A = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def A (self : List[str] ):
A = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def A (self : Optional[int] ):
A = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def A (self : List[Any] ):
A = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def A (self : int ):
A = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def A (self : int ):
A = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def A (self : Tuple ):
A , A = self.get_pretrained_model_and_inputs()
A = model_a(**_lowerCAmelCase )
A = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
A = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
A = model_a(**_lowerCAmelCase )
A = after_outputs[0].numpy()
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : int ):
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : int ):
A = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
A = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : Union[str, Any] ):
A = TFViTModelTester(self )
A = TFBertModelTester(self )
A = vit_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[int] ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : Any ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
A = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A = to_atuple(vision_model.config.image_size )
A = to_atuple(vision_model.config.patch_size )
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A (self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : str ):
A = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
A = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : str ):
A = TFDeiTModelTester(self )
A = TFRobertaModelTester(self )
A = vit_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : Dict ):
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
A = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
A = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : Optional[Any] ):
A = TFCLIPVisionModelTester(self )
A = TFBertModelTester(self )
A = clip_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A (self : Any ):
A = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
A = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
A = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
A = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 337 | 0 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = (EulerDiscreteScheduler,)
__lowerCAmelCase = 10
def A (self : List[Any] , **_lowerCAmelCase : List[Any] ) -> int:
A = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_a )
return config
def A (self : Union[str, Any] ) -> Any:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def A (self : Union[str, Any] ) -> List[Any]:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def A (self : int ) -> str:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def A (self : Dict ) -> Tuple:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def A (self : Dict ) -> Any:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
A = torch.manual_seed(0 )
A = self.dummy_model()
A = self.dummy_sample_deter * scheduler.init_noise_sigma
A = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
A = scheduler.scale_model_input(_a , _a )
A = model(_a , _a )
A = scheduler.step(_a , _a , _a , generator=_a )
A = output.prev_sample
A = torch.sum(torch.abs(_a ) )
A = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def A (self : Any ) -> List[str]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config(prediction_type="""v_prediction""" )
A = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
A = torch.manual_seed(0 )
A = self.dummy_model()
A = self.dummy_sample_deter * scheduler.init_noise_sigma
A = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
A = scheduler.scale_model_input(_a , _a )
A = model(_a , _a )
A = scheduler.step(_a , _a , _a , generator=_a )
A = output.prev_sample
A = torch.sum(torch.abs(_a ) )
A = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 0.0_002 ) < 1e-2
assert abs(result_mean.item() - 2.2_676e-06 ) < 1e-3
def A (self : int ) -> Any:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
A = torch.manual_seed(0 )
A = self.dummy_model()
A = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
A = sample.to(_a )
for t in scheduler.timesteps:
A = scheduler.scale_model_input(_a , _a )
A = model(_a , _a )
A = scheduler.step(_a , _a , _a , generator=_a )
A = output.prev_sample
A = torch.sum(torch.abs(_a ) )
A = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def A (self : Optional[Any] ) -> Optional[Any]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**_a , use_karras_sigmas=_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
A = torch.manual_seed(0 )
A = self.dummy_model()
A = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
A = sample.to(_a )
for t in scheduler.timesteps:
A = scheduler.scale_model_input(_a , _a )
A = model(_a , _a )
A = scheduler.step(_a , _a , _a , generator=_a )
A = output.prev_sample
A = torch.sum(torch.abs(_a ) )
A = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3
| 361 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Any = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
_lowerCamelCase : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_lowerCamelCase : int = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_lowerCamelCase : List[str] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
_lowerCamelCase : int = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
_lowerCamelCase : int = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
_lowerCamelCase : Optional[int] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
_lowerCamelCase : Optional[Any] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
_lowerCamelCase : Any = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
_lowerCamelCase : Union[str, Any] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
_lowerCamelCase : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
_lowerCamelCase : int = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
_lowerCamelCase : Union[str, Any] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
_lowerCamelCase : Any = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
_lowerCamelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_lowerCamelCase : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_lowerCamelCase : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_lowerCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_lowerCamelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_lowerCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_lowerCamelCase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_lowerCamelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_lowerCamelCase : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_lowerCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_lowerCamelCase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_MAPPING
_lowerCamelCase : Optional[Any] = auto_class_update(FlaxAutoModel)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_lowerCamelCase : List[str] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCamelCase : List[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_lowerCamelCase : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowerCamelCase : Tuple = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCamelCase : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_lowerCamelCase : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_lowerCamelCase : str = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_lowerCamelCase : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_lowerCamelCase : List[Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCamelCase : Union[str, Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_lowerCamelCase : Optional[int] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_lowerCamelCase : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 337 | 0 |
'''simple docstring'''
_lowerCamelCase : Optional[Any] = {
'km/h': 1.0,
'm/s': 3.6,
'mph': 1.6_0_9_3_4_4,
'knot': 1.8_5_2,
}
_lowerCamelCase : List[Any] = {
'km/h': 1.0,
'm/s': 0.2_7_7_7_7_7_7_7_8,
'mph': 0.6_2_1_3_7_1_1_9_2,
'knot': 0.5_3_9_9_5_6_8_0_3,
}
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
A = (
f"""Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n"""
f"""Valid values are: {", ".join(lowercase__ )}"""
)
raise ValueError(lowercase__ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def A (self : Any ):
A = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
A = load_dataset("""ashraq/esc50""" )
A = dataset["""train"""]["""audio"""][-1]["""array"""]
A = audio_classifier(_lowerCAmelCase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def A (self : List[str] ):
pass
@slow
@require_torch
def A (self : int ):
A = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
A = load_dataset("""ashraq/esc50""" )
A = dataset["""train"""]["""audio"""][-1]["""array"""]
A = audio_classifier(_lowerCAmelCase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] , )
A = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
A = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def A (self : Tuple ):
pass
| 337 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCAmelCase ( metaclass=__lowercase ):
'''simple docstring'''
__lowerCAmelCase = ['''transformers''', '''torch''', '''note_seq''']
def __init__(self : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def A (cls : Any , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Any ):
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def A (cls : Union[str, Any] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Optional[int] ):
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
| 363 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowerCamelCase : Dict = 'src/diffusers'
_lowerCamelCase : Dict = '.'
# This is to make sure the diffusers module imported is the one in the repo.
_lowerCamelCase : List[str] = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowerCamelCase : Tuple = spec.loader.load_module()
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
return line.startswith(UpperCAmelCase ) or len(UpperCAmelCase ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , UpperCAmelCase ) is not None
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = object_name.split(""".""" )
A = 0
# First let's find the module where our object lives.
A = parts[i]
while i < len(UpperCAmelCase ) and not os.path.isfile(os.path.join(UpperCAmelCase , f"""{module}.py""" ) ):
i += 1
if i < len(UpperCAmelCase ):
A = os.path.join(UpperCAmelCase , parts[i] )
if i >= len(UpperCAmelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(UpperCAmelCase , f"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A = f.readlines()
# Now let's find the class / func in the code!
A = """"""
A = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A = line_index
while line_index < len(UpperCAmelCase ) and _should_continue(lines[line_index] , UpperCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
return "".join(UpperCAmelCase )
_lowerCamelCase : str = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
_lowerCamelCase : Any = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
_lowerCamelCase : str = re.compile(R'<FILL\s+[^>]*>')
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
A = code.split("""\n""" )
A = 0
while idx < len(UpperCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
A = len(get_indent(UpperCAmelCase ) ) > 0
if has_indent:
A = f"""class Bla:\n{code}"""
A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCAmelCase )
A = black.format_str(UpperCAmelCase , mode=UpperCAmelCase )
A , A = style_docstrings_in_code(UpperCAmelCase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def __a ( UpperCAmelCase , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
with open(UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A = f.readlines()
A = []
A = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase ):
A = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A , A , A = search.groups()
A = find_code_in_diffusers(UpperCAmelCase )
A = get_indent(UpperCAmelCase )
A = line_index + 1 if indent == theoretical_indent else line_index + 2
A = theoretical_indent
A = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A = True
while line_index < len(UpperCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase ):
break
A = lines[line_index]
A = _should_continue(UpperCAmelCase , UpperCAmelCase ) and re.search(f"""^{indent}# End copy""" , UpperCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
A = """""".join(UpperCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
A = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCAmelCase ) is None]
A = """\n""".join(UpperCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase ) > 0:
A = replace_pattern.replace("""with""" , """""" ).split(""",""" )
A = [_re_replace_pattern.search(UpperCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A , A , A = pattern.groups()
A = re.sub(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if option.strip() == "all-casing":
A = re.sub(obja.lower() , obja.lower() , UpperCAmelCase )
A = re.sub(obja.upper() , obja.upper() , UpperCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A = blackify(lines[start_index - 1] + theoretical_code )
A = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A = lines[:start_index] + [theoretical_code] + lines[line_index:]
A = start_index + 1
if overwrite and len(UpperCAmelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase )
return diffs
def __a ( UpperCAmelCase = False ) ->int:
"""simple docstring"""
A = glob.glob(os.path.join(UpperCAmelCase , """**/*.py""" ) , recursive=UpperCAmelCase )
A = []
for filename in all_files:
A = is_copy_consistent(UpperCAmelCase , UpperCAmelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(UpperCAmelCase ) > 0:
A = """\n""".join(UpperCAmelCase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_lowerCamelCase : Any = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 337 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __a ( UpperCAmelCase , UpperCAmelCase=10 ) ->Optional[int]:
"""simple docstring"""
A = []
for _ in range(a_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __a ( UpperCAmelCase , UpperCAmelCase=10 ) ->Optional[Any]:
"""simple docstring"""
A = []
for step in range(a_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(a_ , """schedule.bin""" )
torch.save(scheduler.state_dict() , a_ )
A = torch.load(a_ )
scheduler.load_state_dict(a_ )
return lrs
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : int ):
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for a, b in zip(lowercase_ , lowercase_ ):
self.assertAlmostEqual(lowercase_ , lowercase_ , delta=lowercase_ )
def A (self : Optional[int] ):
A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowercase_ )
A = torch.tensor([0.4, 0.2, -0.5] )
A = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
A = criterion(lowercase_ , lowercase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def A (self : int ):
A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowercase_ )
A = torch.tensor([0.4, 0.2, -0.5] )
A = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowercase_ , weight_decay=0.0 , relative_step=lowercase_ , scale_parameter=lowercase_ , warmup_init=lowercase_ , )
for _ in range(1000 ):
A = criterion(lowercase_ , lowercase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = nn.Linear(50 , 50 ) if is_torch_available() else None
__lowerCAmelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
__lowerCAmelCase = 10
def A (self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str]=None ):
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for a, b in zip(lowercase_ , lowercase_ ):
self.assertAlmostEqual(lowercase_ , lowercase_ , delta=lowercase_ , msg=lowercase_ )
def A (self : Dict ):
A = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A , A = data
A = scheduler_func(self.optimizer , **lowercase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A = unwrap_schedule(lowercase_ , self.num_steps )
self.assertListAlmostEqual(
lowercase_ , lowercase_ , tol=1e-2 , msg=F"""failed for {scheduler_func} in normal scheduler""" , )
A = scheduler_func(self.optimizer , **lowercase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowercase_ ) # wrap to test picklability of the schedule
A = unwrap_and_save_reload_schedule(lowercase_ , self.num_steps )
self.assertListEqual(lowercase_ , lowercase_ , msg=F"""failed for {scheduler_func} in save and reload""" )
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Any , _lowerCAmelCase : Optional[Any] ):
A = fn
def __call__(self : int , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : List[str] ):
return self.fn(*lowercase_ , **lowercase_ )
@classmethod
def A (self : List[str] , _lowerCAmelCase : List[Any] ):
A = list(map(self , scheduler.lr_lambdas ) )
| 364 |
'''simple docstring'''
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
A = credit_card_number
A = 0
A = len(UpperCAmelCase ) - 2
for i in range(UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
A = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
A = cc_number[:i] + str(UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
A = f"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(f"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(UpperCAmelCase ) <= 16:
print(f"""{error_message} of its length.""" )
return False
if not validate_initial_digits(UpperCAmelCase ):
print(f"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(UpperCAmelCase ):
print(f"""{error_message} it fails the Luhn check.""" )
return False
print(f"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 337 | 0 |
'''simple docstring'''
def __a ( UpperCAmelCase ) ->list:
"""simple docstring"""
if len(__a ) <= 1:
return [tuple(__a )]
A = []
def generate(UpperCAmelCase , UpperCAmelCase ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , __a )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A = arr[k - 1], arr[i]
else: # k is odd
A = arr[k - 1], arr[0]
generate(k - 1 , __a )
generate(len(__a ) , __a )
return res
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase : int = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 365 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Any , _lowerCAmelCase : List[Any] ):
A = str(id_ )
A = None
A = None
A = []
A = {} # {vertex:distance}
def __lt__(self : List[Any] , _lowerCAmelCase : Tuple ):
return self.key < other.key
def __repr__(self : str ):
return self.id
def A (self : Union[str, Any] , _lowerCAmelCase : List[str] ):
self.neighbors.append(_lowerCAmelCase )
def A (self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ):
A = weight
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , UpperCAmelCase )
graph[b - 1].add_edge(graph[a - 1] , UpperCAmelCase )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->list:
"""simple docstring"""
A = []
for u in graph:
A = math.inf
A = None
A = 0
A = graph[:]
while q:
A = min(UpperCAmelCase )
q.remove(UpperCAmelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
A = u
A = u.edges[v.id]
for i in range(1 , len(UpperCAmelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Iterator[tuple]:
"""simple docstring"""
for u in graph:
A = math.inf
A = None
A = 0
A = list(UpperCAmelCase )
hq.heapify(UpperCAmelCase )
while h:
A = hq.heappop(UpperCAmelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
A = u
A = u.edges[v.id]
hq.heapify(UpperCAmelCase )
for i in range(1 , len(UpperCAmelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __a ( ) ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Optional[int]=7 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Optional[Any]=99 , _lowerCAmelCase : Dict=32 , _lowerCAmelCase : List[Any]=5 , _lowerCAmelCase : List[Any]=4 , _lowerCAmelCase : Any=37 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : List[Any]=512 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : List[Any]=4 , _lowerCAmelCase : Optional[int]=None , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = scope
def A (self : Tuple ):
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A = ids_tensor([self.batch_size] , self.num_choices )
A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A (self : Optional[int] ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__UpperCAmelCase , )
def A (self : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : List[str] ):
A = FalconModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
A = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A (self : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , ):
A = True
A = FalconModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
A = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A (self : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , ):
A = FalconForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A (self : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , ):
A = True
A = True
A = FalconForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
A = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
A = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) , config.vocab_size )
A = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A = torch.cat([input_ids, next_tokens] , dim=-1 )
A = torch.cat([input_mask, next_mask] , dim=-1 )
A = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0]
A = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0]
# select random slice
A = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A = output_from_no_past[:, -3:, random_slice_idx].detach()
A = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def A (self : List[str] ):
A = self.prepare_config_and_inputs()
(
A
) = config_and_inputs
A = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (FalconForCausalLM,) if is_torch_available() else ()
__lowerCAmelCase = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def A (self : Tuple ):
A = FalconModelTester(self )
A = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def A (self : Union[str, Any] ):
self.config_tester.run_common_tests()
def A (self : List[Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def A (self : Union[str, Any] ):
A = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
A = alibi
self.model_tester.create_and_check_model(__UpperCAmelCase , *__UpperCAmelCase )
def A (self : List[Any] ):
A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = input_dict["""input_ids"""]
A = input_ids.ne(1 ).to(__UpperCAmelCase )
A = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A = FalconForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A (self : Dict ):
A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = """single_label_classification"""
A = input_dict["""input_ids"""]
A = input_ids.ne(1 ).to(__UpperCAmelCase )
A = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A = FalconForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A (self : Union[str, Any] ):
A = self.model_tester.prepare_config_and_inputs_for_common()
A = input_dict["""input_ids"""]
A = FalconForCausalLM(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
A = input_ids.shape[0]
A = model._convert_to_rw_cache(result.past_key_values )
A = model._convert_cache_to_standard_format(__UpperCAmelCase , __UpperCAmelCase )
for layer in range(len(__UpperCAmelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def A (self : List[Any] ):
A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = """multi_label_classification"""
A = input_dict["""input_ids"""]
A = input_ids.ne(1 ).to(__UpperCAmelCase )
A = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A = FalconForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A (self : Tuple ):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
A = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__UpperCAmelCase , """use_cache""" ):
return
A = model_class(__UpperCAmelCase ).to(__UpperCAmelCase )
if "use_cache" not in inputs:
A = True
A = model(**__UpperCAmelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
A = (
getattr(__UpperCAmelCase , """decoder_layers""" , __UpperCAmelCase )
or getattr(__UpperCAmelCase , """num_decoder_layers""" , __UpperCAmelCase )
or config.num_hidden_layers
)
A = getattr(__UpperCAmelCase , """num_kv_heads""" , config.num_attention_heads )
A = getattr(__UpperCAmelCase , """d_model""" , config.hidden_size )
A = embed_dim // num_attention_heads
A = outputs["""past_key_values"""]
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
A = inputs["""input_ids"""].shape
for i in range(__UpperCAmelCase ):
if config.new_decoder_architecture:
A = config.num_attention_heads
elif config.multi_query:
A = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A (self : Tuple ):
A = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
A = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
model.eval()
model.to(__UpperCAmelCase )
A = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__UpperCAmelCase )
A = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
A = model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=19 )
A = tokenizer.batch_decode(__UpperCAmelCase )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def A (self : int ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
A = AutoTokenizer.from_pretrained(__UpperCAmelCase )
A = FalconForCausalLM.from_pretrained(__UpperCAmelCase )
model.eval()
model.to(__UpperCAmelCase )
A = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__UpperCAmelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=4 )
model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=4 )
model.generate(**__UpperCAmelCase , num_beams=2 , max_new_tokens=4 )
@slow
def A (self : Union[str, Any] ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
A = AutoTokenizer.from_pretrained(__UpperCAmelCase )
A = FalconForCausalLM.from_pretrained(__UpperCAmelCase )
model.eval()
model.to(device=__UpperCAmelCase )
A = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__UpperCAmelCase )
# Test results are the same with and without cache
A = model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=20 , use_cache=__UpperCAmelCase )
A = model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=20 , use_cache=__UpperCAmelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 366 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''perceiver'''
def __init__(self : Dict , _lowerCAmelCase : List[str]=256 , _lowerCAmelCase : Any=1280 , _lowerCAmelCase : Dict=768 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : Optional[int]=26 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : List[Any]="kv" , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : int=1 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Any=1e-12 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : int=262 , _lowerCAmelCase : int=2048 , _lowerCAmelCase : int=56 , _lowerCAmelCase : List[Any]=[368, 496] , _lowerCAmelCase : List[Any]=16 , _lowerCAmelCase : Any=1920 , _lowerCAmelCase : Optional[int]=16 , _lowerCAmelCase : List[Any]=[1, 16, 224, 224] , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
A = num_latents
A = d_latents
A = d_model
A = num_blocks
A = num_self_attends_per_block
A = num_self_attention_heads
A = num_cross_attention_heads
A = qk_channels
A = v_channels
A = cross_attention_shape_for_attention
A = self_attention_widening_factor
A = cross_attention_widening_factor
A = hidden_act
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = use_query_residual
# masked language modeling attributes
A = vocab_size
A = max_position_embeddings
# image classification attributes
A = image_size
# flow attributes
A = train_size
# multimodal autoencoding attributes
A = num_frames
A = audio_samples_per_frame
A = samples_per_patch
A = output_shape
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@property
def A (self : List[str] ):
if self.task == "multiple-choice":
A = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def A (self : Dict ):
return 1e-4
def A (self : List[Any] , _lowerCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 40 , _lowerCAmelCase : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = preprocessor.num_special_tokens_to_add(_lowerCAmelCase )
A = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
A = [""" """.join(["""a"""] ) * seq_length] * batch_size
A = dict(preprocessor(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
A = inputs.pop("""input_ids""" )
return inputs
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
A = self._generate_dummy_images(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A = dict(preprocessor(images=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
A = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 337 | 0 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def __a ( UpperCAmelCase="ro" , UpperCAmelCase="en" , UpperCAmelCase="wmt16" , UpperCAmelCase=None ) ->Tuple:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
A = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
A = datasets.load_dataset(a_ , a_ )
if save_dir is None:
A = f"""{dataset}-{pair}"""
A = Path(a_ )
save_dir.mkdir(exist_ok=a_ )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
A = "val" if split == "validation" else split
A = save_dir.joinpath(f"""{fn}.source""" )
A = save_dir.joinpath(f"""{fn}.target""" )
A = src_path.open("""w+""" )
A = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
A = x["translation"]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 367 |
'''simple docstring'''
import math
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : int , _lowerCAmelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
A = n
A = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # adjacency matrix for weight
A = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # dp[i][j] stores minimum distance from i to j
def A (self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
A = w
def A (self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def A (self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ):
return self.dp[u][v]
if __name__ == "__main__":
_lowerCamelCase : str = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337 | 0 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self : Tuple , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : int ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
A = {}
def A (self : List[str] , _lowerCAmelCase : Union[str, Any] , *_lowerCAmelCase : int , **_lowerCAmelCase : Dict ):
A = super().add_tokens(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
""" `placeholder_token` that is not already in the tokenizer.""" )
def A (self : Optional[int] , _lowerCAmelCase : List[str] , *_lowerCAmelCase : Optional[int] , _lowerCAmelCase : int=1 , **_lowerCAmelCase : List[str] ):
A = []
if num_vec_per_token == 1:
self.try_adding_tokens(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
output.append(_SCREAMING_SNAKE_CASE )
else:
A = []
for i in range(_SCREAMING_SNAKE_CASE ):
A = placeholder_token + F"""_{i}"""
self.try_adding_tokens(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
output.append(_SCREAMING_SNAKE_CASE )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
A = output
def A (self : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Optional[int]=1.0 ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=_SCREAMING_SNAKE_CASE ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
A = self.token_map[placeholder_token]
A = tokens[: 1 + int(len(_SCREAMING_SNAKE_CASE ) * prop_tokens_to_load )]
if vector_shuffle:
A = copy.copy(_SCREAMING_SNAKE_CASE )
random.shuffle(_SCREAMING_SNAKE_CASE )
A = text.replace(_SCREAMING_SNAKE_CASE , """ """.join(_SCREAMING_SNAKE_CASE ) )
return text
def __call__(self : Union[str, Any] , _lowerCAmelCase : Tuple , *_lowerCAmelCase : Optional[int] , _lowerCAmelCase : str=False , _lowerCAmelCase : List[str]=1.0 , **_lowerCAmelCase : Optional[int] ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
_SCREAMING_SNAKE_CASE , vector_shuffle=_SCREAMING_SNAKE_CASE , prop_tokens_to_load=_SCREAMING_SNAKE_CASE ) , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def A (self : Any , _lowerCAmelCase : List[str] , *_lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : List[str]=1.0 , **_lowerCAmelCase : List[Any] ):
return super().encode(
self.replace_placeholder_tokens_in_text(
_SCREAMING_SNAKE_CASE , vector_shuffle=_SCREAMING_SNAKE_CASE , prop_tokens_to_load=_SCREAMING_SNAKE_CASE ) , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
| 368 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase : List[str] = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase : List[str] = {
'Salesforce/codegen-350M-mono': 2048,
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = CodeGenTokenizer
def __init__(self : int , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Optional[Any]="<|endoftext|>" , _lowerCAmelCase : Dict="<|endoftext|>" , _lowerCAmelCase : Dict="<|endoftext|>" , _lowerCAmelCase : Any=False , **_lowerCAmelCase : Optional[int] , ):
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
if kwargs.pop("""add_bos_token""" , _lowerCAmelCase ):
A = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
F"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
F"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowerCAmelCase ) != add_prefix_space:
A = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
A = add_prefix_space
A = pre_tok_class(**_lowerCAmelCase )
A = add_prefix_space
def A (self : int , *_lowerCAmelCase : int , **_lowerCAmelCase : List[Any] ):
A = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[Any] ):
A = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def A (self : Tuple , _lowerCAmelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[List[str]] = None , **_lowerCAmelCase : Tuple , ):
A = super().decode(
token_ids=_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase , **_lowerCAmelCase , )
if truncate_before_pattern is not None and len(_lowerCAmelCase ) > 0:
A = self.truncate(_lowerCAmelCase , _lowerCAmelCase )
return decoded_text
def A (self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
def find_re(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ):
A = pattern.search(_lowerCAmelCase , _lowerCAmelCase )
return m.start() if m else -1
A = [re.compile(_lowerCAmelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
A = list(re.finditer("""^print""" , _lowerCAmelCase , re.MULTILINE ) )
if len(_lowerCAmelCase ) > 1:
A = completion[: prints[1].start()]
A = list(re.finditer("""^def""" , _lowerCAmelCase , re.MULTILINE ) )
if len(_lowerCAmelCase ) > 1:
A = completion[: defs[1].start()]
A = 0
A = [
pos for pos in [find_re(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for terminal in terminals] if pos != -1
]
if len(_lowerCAmelCase ) > 0:
return completion[: min(_lowerCAmelCase )]
else:
return completion
| 337 | 0 |
_lowerCamelCase : Dict = [
'''Audio''',
'''Array2D''',
'''Array3D''',
'''Array4D''',
'''Array5D''',
'''ClassLabel''',
'''Features''',
'''Sequence''',
'''Value''',
'''Image''',
'''Translation''',
'''TranslationVariableLanguages''',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 369 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Optional[Any] = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 337 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __a ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
for char in word:
A = ord(__SCREAMING_SNAKE_CASE )
if not _is_chinese_char(__SCREAMING_SNAKE_CASE ):
return 0
return 1
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
A = set()
for token in tokens:
A = len(__SCREAMING_SNAKE_CASE ) > 1 and is_chinese(__SCREAMING_SNAKE_CASE )
if chinese_word:
word_set.add(__SCREAMING_SNAKE_CASE )
A = list(__SCREAMING_SNAKE_CASE )
return word_list
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
A = max([len(__SCREAMING_SNAKE_CASE ) for w in chinese_word_set] )
A = bert_tokens
A = 0, len(__SCREAMING_SNAKE_CASE )
while start < end:
A = True
if is_chinese(bert_word[start] ):
A = min(end - start , __SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE , 1 , -1 ):
A = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
A = """##""" + bert_word[j]
A = start + i
A = False
break
if single_word:
start += 1
return bert_word
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
A = []
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , 100 ):
A = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
A = [get_chinese_word(__SCREAMING_SNAKE_CASE ) for r in res]
ltp_res.extend(__SCREAMING_SNAKE_CASE )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
A = []
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , 100 ):
A = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
A = []
for input_ids, chinese_word in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
A = []
for id in input_ids:
A = bert_tokenizer._convert_id_to_token(__SCREAMING_SNAKE_CASE )
input_tokens.append(__SCREAMING_SNAKE_CASE )
A = add_sub_symbol(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
A = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__SCREAMING_SNAKE_CASE ):
if token[:2] == "##":
A = token[2:]
# save chinese tokens' pos
if len(__SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(__SCREAMING_SNAKE_CASE ) ):
ref_id.append(__SCREAMING_SNAKE_CASE )
ref_ids.append(__SCREAMING_SNAKE_CASE )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
return ref_ids
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
A = f.readlines()
A = [line.strip() for line in data if len(__SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A = LTP(args.ltp ) # faster in GPU device
A = BertTokenizer.from_pretrained(args.bert )
A = prepare_ref(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
A = [json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" for ref in ref_ids]
f.writelines(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
_lowerCamelCase : str = parser.parse_args()
main(args)
| 370 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowerCAmelCase )
try:
pickle.loads(pickle.dumps(_lowerCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 337 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __a ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
A = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
A = emb.weight.shape
A = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ )
A = emb.weight.data
return lin_layer
def __a ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
A = torch.load(UpperCAmelCase__ , map_location="""cpu""" )
A = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
A = mam_aaa["""model"""]
remove_ignore_keys_(UpperCAmelCase__ )
A = state_dict["""encoder.embed_tokens.weight"""].shape[0]
A = MaMaaaConfig(
vocab_size=UpperCAmelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
A = state_dict["""decoder.embed_tokens.weight"""]
A = MaMaaaForConditionalGeneration(UpperCAmelCase__ )
model.model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_lowerCamelCase : Optional[int] = parser.parse_args()
_lowerCamelCase : Optional[int] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 371 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Tuple , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Dict ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Optional[int] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Any ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : List[str] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : str ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : List[str] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : int ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : List[Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : str ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : List[str] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : List[Any] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Union[str, Any] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : int ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Any , *_lowerCAmelCase : str , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : List[Any] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : List[str] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Any ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Optional[int] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Dict ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Union[str, Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : List[str] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Union[str, Any] , *_lowerCAmelCase : Any , **_lowerCAmelCase : str ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Optional[Any] , *_lowerCAmelCase : int , **_lowerCAmelCase : Any ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Dict , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : int ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Any ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Optional[Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Tuple ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 337 | 0 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def __a ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( ) ->List[Any]:
"""simple docstring"""
A = 2
while True:
if is_prime(__a ):
yield num
num += 1
def __a ( UpperCAmelCase = 2000000 ) ->Dict:
"""simple docstring"""
return sum(takewhile(lambda UpperCAmelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 350 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __a ( ) ->str:
"""simple docstring"""
A = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=UpperCAmelCase , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=UpperCAmelCase , default=5 )
parser.add_argument("""--batch_size""" , type=UpperCAmelCase , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=UpperCAmelCase , default=1 )
parser.add_argument("""--freeze""" , type=UpperCAmelCase , default=UpperCAmelCase )
parser.add_argument("""--learning_rate""" , type=UpperCAmelCase , default=5E-4 )
parser.add_argument("""--seed""" , type=UpperCAmelCase , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=UpperCAmelCase , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=UpperCAmelCase , default=10 )
parser.add_argument("""--weight_decay""" , type=UpperCAmelCase , default=0.01 )
parser.add_argument("""--output_dir""" , type=UpperCAmelCase , default="""./results""" )
return parser.parse_args()
_lowerCamelCase : Optional[Any] = load('accuracy')
def __a ( UpperCAmelCase ) ->Any:
"""simple docstring"""
A , A = eval_pred
A = np.argmax(UpperCAmelCase , axis=1 )
return metric.compute(predictions=UpperCAmelCase , references=UpperCAmelCase )
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : Union[str, Any] , _lowerCAmelCase : Any ):
super().__init__()
A = trainer
def A (self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , **_lowerCAmelCase : List[Any] ):
if control.should_evaluate:
A = deepcopy(_lowerCAmelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def __a ( ) ->Optional[int]:
"""simple docstring"""
A = get_args()
set_seed(args.seed )
A = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
A = dataset.train_test_split(test_size=0.2 )
A = train_test["""test"""].train_test_split(test_size=0.5 )
A = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
A = AutoTokenizer.from_pretrained(args.model_ckpt )
A = tokenizer.eos_token
A = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
A = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
A = False
A = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(UpperCAmelCase ):
A = tokenizer(example["""src"""] , truncation=UpperCAmelCase , max_length=1024 )
A = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
A = train_test_validation.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=train_test_validation["""train"""].column_names , )
A = DataCollatorWithPadding(tokenizer=UpperCAmelCase )
A = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
A = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=UpperCAmelCase , data_collator=UpperCAmelCase , compute_metrics=UpperCAmelCase , )
print("""Training...""" )
trainer.add_callback(CustomCallback(UpperCAmelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 337 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {"""vocab_file""": """sentencepiece.model"""}
_lowerCamelCase : Tuple = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
}
_lowerCamelCase : Optional[Any] = {
"""google/rembert""": 256,
}
class __UpperCAmelCase ( lowercase__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : List[Any]="[CLS]" , _lowerCAmelCase : Optional[Any]="[SEP]" , _lowerCAmelCase : Optional[Any]="[UNK]" , _lowerCAmelCase : Union[str, Any]="[SEP]" , _lowerCAmelCase : int="[PAD]" , _lowerCAmelCase : str="[CLS]" , _lowerCAmelCase : Tuple="[MASK]" , **_lowerCAmelCase : List[str] , ):
super().__init__(
do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = spm.SentencePieceProcessor()
self.sp_model.Load(lowercase_ )
@property
def A (self : Union[str, Any] ):
return len(self.sp_model )
def A (self : int ):
A = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : Dict ):
A = self.__dict__.copy()
A = None
return state
def __setstate__(self : List[Any] , _lowerCAmelCase : Any ):
A = d
A = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def A (self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Dict=False ):
A = self.sp_model.EncodeAsPieces(lowercase_ )
return pieces
def A (self : str , _lowerCAmelCase : int ):
return self.sp_model.PieceToId(lowercase_ )
def A (self : Tuple , _lowerCAmelCase : Any ):
return self.sp_model.IdToPiece(lowercase_ )
def A (self : List[str] , _lowerCAmelCase : List[Any] ):
A = self.sp_model.decode_pieces(lowercase_ )
return out_string
def A (self : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A (self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Any] = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1]
def A (self : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : str = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A (self : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any = None ):
if not os.path.isdir(lowercase_ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(lowercase_ ) )
return
A = os.path.join(
lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 351 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
_lowerCamelCase : Dict = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
_lowerCamelCase : Optional[Any] = {
'ctrl': 256,
}
_lowerCamelCase : List[str] = {
'Pregnancy': 16_8629,
'Christianity': 7675,
'Explain': 10_6423,
'Fitness': 6_3440,
'Saving': 6_3163,
'Ask': 2_7171,
'Ass': 9_5985,
'Joke': 16_3509,
'Questions': 4_5622,
'Thoughts': 4_9605,
'Retail': 5_2342,
'Feminism': 16_4338,
'Writing': 1_1992,
'Atheism': 19_2263,
'Netflix': 4_8616,
'Computing': 3_9639,
'Opinion': 4_3213,
'Alone': 4_4967,
'Funny': 5_8917,
'Gaming': 4_0358,
'Human': 4088,
'India': 1331,
'Joker': 7_7138,
'Diet': 3_6206,
'Legal': 1_1859,
'Norman': 4939,
'Tip': 7_2689,
'Weight': 5_2343,
'Movies': 4_6273,
'Running': 2_3425,
'Science': 2090,
'Horror': 3_7793,
'Confession': 6_0572,
'Finance': 1_2250,
'Politics': 1_6360,
'Scary': 19_1985,
'Support': 1_2654,
'Technologies': 3_2516,
'Teenage': 6_6160,
'Event': 3_2769,
'Learned': 6_7460,
'Notion': 18_2770,
'Wikipedia': 3_7583,
'Books': 6665,
'Extract': 7_6050,
'Confessions': 10_2701,
'Conspiracy': 7_5932,
'Links': 6_3674,
'Narcissus': 15_0425,
'Relationship': 5_4766,
'Relationships': 13_4796,
'Reviews': 4_1671,
'News': 4256,
'Translation': 2_6820,
'multilingual': 12_8406,
}
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
A = set(UpperCAmelCase )
return pairs
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = CONTROL_CODES
def __init__(self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any]="<unk>" , **_lowerCAmelCase : Dict ):
super().__init__(unk_token=_lowerCAmelCase , **_lowerCAmelCase )
with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
A = json.load(_lowerCAmelCase )
A = {v: k for k, v in self.encoder.items()}
with open(_lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
A = merges_handle.read().split("""\n""" )[1:-1]
A = [tuple(merge.split() ) for merge in merges]
A = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
A = {}
@property
def A (self : Tuple ):
return len(self.encoder )
def A (self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def A (self : Optional[int] , _lowerCAmelCase : Optional[int] ):
if token in self.cache:
return self.cache[token]
A = tuple(_lowerCAmelCase )
A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
A = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(_lowerCAmelCase ):
try:
A = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(_lowerCAmelCase )
A = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
A = get_pairs(_lowerCAmelCase )
A = """@@ """.join(_lowerCAmelCase )
A = word[:-4]
A = word
return word
def A (self : List[str] , _lowerCAmelCase : Dict ):
A = []
A = re.findall(r"""\S+\n?""" , _lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(_lowerCAmelCase ).split(""" """ ) ) )
return split_tokens
def A (self : str , _lowerCAmelCase : int ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def A (self : Dict , _lowerCAmelCase : str ):
return self.decoder.get(_lowerCAmelCase , self.unk_token )
def A (self : List[str] , _lowerCAmelCase : List[Any] ):
A = """ """.join(_lowerCAmelCase ).replace("""@@ """ , """""" ).strip()
return out_string
def A (self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" )
A = 0
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
A = token_index
writer.write(""" """.join(_lowerCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 337 | 0 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __a ( ) ->tuple[list[int], int]:
"""simple docstring"""
A = [randint(-1000 , 1000 ) for i in range(10 )]
A = randint(-5000 , 5000 )
return (arr, r)
_lowerCamelCase : Any = make_dataset()
def __a ( UpperCAmelCase , UpperCAmelCase ) ->tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(_snake_case , 3 ):
if sum(_snake_case ) == target:
return tuple(sorted(_snake_case ) )
return (0, 0, 0)
def __a ( UpperCAmelCase , UpperCAmelCase ) ->tuple[int, int, int]:
"""simple docstring"""
arr.sort()
A = len(_snake_case )
for i in range(n - 1 ):
A = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __a ( ) ->tuple[float, float]:
"""simple docstring"""
A = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
A = '''
triplet_sum1(*dataset)
'''
A = '''
triplet_sum2(*dataset)
'''
A = repeat(setup=_snake_case , stmt=_snake_case , repeat=5 , number=10000 )
A = repeat(setup=_snake_case , stmt=_snake_case , repeat=5 , number=10000 )
return (min(_snake_case ), min(_snake_case ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCamelCase : Union[str, Any] = solution_times()
print(f"The time for naive implementation is {times[0]}.")
print(f"The time for optimized implementation is {times[1]}.")
| 352 |
'''simple docstring'''
_lowerCamelCase : List[Any] = 'Input must be a string of 8 numbers plus letter'
_lowerCamelCase : str = 'TRWAGMYFPDXBNJZSQVHLCKE'
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
A = f"""Expected string as input, found {type(UpperCAmelCase ).__name__}"""
raise TypeError(UpperCAmelCase )
A = spanish_id.replace("""-""" , """""" ).upper()
if len(UpperCAmelCase ) != 9:
raise ValueError(UpperCAmelCase )
try:
A = int(spanish_id_clean[0:8] )
A = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(UpperCAmelCase ) from ex
if letter.isdigit():
raise ValueError(UpperCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase : Any = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 1024,
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
_lowerCamelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class __UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = NllbTokenizer
__lowerCAmelCase = []
__lowerCAmelCase = []
def __init__(self : Optional[int] , _lowerCAmelCase : int=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Optional[Any]="<s>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : List[Any]="</s>" , _lowerCAmelCase : Tuple="<s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]="<pad>" , _lowerCAmelCase : Optional[int]="<mask>" , _lowerCAmelCase : str=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[Any]=False , **_lowerCAmelCase : Optional[Any] , ):
A = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
A = legacy_behaviour
super().__init__(
vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , )
A = vocab_file
A = False if not self.vocab_file else True
A = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
A = {
lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A = src_lang if src_lang is not None else 'eng_Latn'
A = self.convert_tokens_to_ids(self._src_lang )
A = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A (self : Union[str, Any] ):
return self._src_lang
@src_lang.setter
def A (self : Optional[Any] , _lowerCAmelCase : Dict ):
A = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A (self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A (self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : str = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A (self : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Any ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
A = src_lang
A = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
A = self.convert_tokens_to_ids(snake_case__ )
A = tgt_lang_id
return inputs
def A (self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict = "eng_Latn" , _lowerCAmelCase : List[Any] = None , _lowerCAmelCase : Optional[int] = "fra_Latn" , **_lowerCAmelCase : Optional[int] , ):
A = src_lang
A = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def A (self : Any ):
return self.set_src_lang_special_tokens(self.src_lang )
def A (self : str ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A (self : Optional[Any] , _lowerCAmelCase : Any ):
A = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
A = []
A = [self.eos_token_id, self.cur_lang_code]
else:
A = [self.cur_lang_code]
A = [self.eos_token_id]
A = self.convert_ids_to_tokens(self.prefix_tokens )
A = self.convert_ids_to_tokens(self.suffix_tokens )
A = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A (self : str , _lowerCAmelCase : List[str] ):
A = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
A = []
A = [self.eos_token_id, self.cur_lang_code]
else:
A = [self.cur_lang_code]
A = [self.eos_token_id]
A = self.convert_ids_to_tokens(self.prefix_tokens )
A = self.convert_ids_to_tokens(self.suffix_tokens )
A = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A (self : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(snake_case__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
A = os.path.join(
snake_case__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 353 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''umt5'''
__lowerCAmelCase = ['''past_key_values''']
def __init__(self : Dict , _lowerCAmelCase : Optional[int]=25_0112 , _lowerCAmelCase : int=512 , _lowerCAmelCase : Any=64 , _lowerCAmelCase : int=1024 , _lowerCAmelCase : int=8 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Optional[int]=6 , _lowerCAmelCase : Optional[int]=32 , _lowerCAmelCase : Any=128 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Optional[int]=1e-6 , _lowerCAmelCase : Dict=1.0 , _lowerCAmelCase : Tuple="gated-gelu" , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Optional[int]="T5Tokenizer" , _lowerCAmelCase : int=True , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : str=1 , _lowerCAmelCase : Union[str, Any]=0 , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(
is_encoder_decoder=_lowerCAmelCase , tokenizer_class=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
A = vocab_size
A = d_model
A = d_kv
A = d_ff
A = num_layers
A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A = num_heads
A = relative_attention_num_buckets
A = relative_attention_max_distance
A = dropout_rate
A = layer_norm_epsilon
A = initializer_factor
A = feed_forward_proj
A = use_cache
A = self.feed_forward_proj.split("""-""" )
A = act_info[-1]
A = act_info[0] == """gated"""
if len(_lowerCAmelCase ) > 1 and act_info[0] != "gated" or len(_lowerCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
A = """gelu_new"""
@property
def A (self : Optional[Any] ):
return self.d_model
@property
def A (self : List[Any] ):
return self.num_heads
@property
def A (self : Dict ):
return self.num_layers
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def A (self : Optional[Any] ):
A = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
A = """past_encoder_sequence + sequence"""
A = {0: """batch"""}
A = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
A = {0: """batch""", 1: """decoder_sequence"""}
A = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def A (self : Union[str, Any] ):
return 13
@property
def A (self : Tuple ):
return 5e-4
| 337 | 0 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@slow
@require_torch
def A (self : Any ):
A = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
A = BertTokenizer.from_pretrained("""bert-base-uncased""" )
A = bertabert.config.encoder.vocab_size
A = tokenizer.sep_token_id
A = tokenizer.cls_token_id
A = 128
A = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
A = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
A = train_dataset.select(range(32 ) )
A = val_dataset.select(range(16 ) )
A = 4
def _map_to_encoder_decoder_inputs(_lowerCAmelCase : Optional[int] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
A = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=__UpperCAmelCase , max_length=512 )
A = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=__UpperCAmelCase , max_length=128 )
A = inputs.input_ids
A = inputs.attention_mask
A = outputs.input_ids
A = outputs.input_ids.copy()
A = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
A = outputs.attention_mask
assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_lowerCAmelCase : int ):
A = pred.label_ids
A = pred.predictions
# all unnecessary tokens are removed
A = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
A = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
A = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
A = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
A = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
A = self.get_auto_remove_tmp_dir()
A = SeqaSeqTrainingArguments(
output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy="""steps""" , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
A = SeqaSeqTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , )
# start training
trainer.train()
| 354 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''yolos'''
def __init__(self : Tuple , _lowerCAmelCase : List[Any]=768 , _lowerCAmelCase : str=12 , _lowerCAmelCase : Tuple=12 , _lowerCAmelCase : Optional[int]=3072 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Optional[Any]=1e-12 , _lowerCAmelCase : Optional[Any]=[512, 864] , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[int]=100 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Any=0.1 , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def A (self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A (self : Any ):
return 1e-4
@property
def A (self : int ):
return 12
| 337 | 0 |
from typing import Any
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Any , _lowerCAmelCase : Any ):
A = data
A = None
def __repr__(self : List[Any] ):
return F"""Node({self.data})"""
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : List[Any] ):
A = None
def __iter__(self : Dict ):
A = self.head
while node:
yield node.data
A = node.next
def __len__(self : List[str] ):
return sum(1 for _ in self )
def __repr__(self : str ):
return "->".join([str(SCREAMING_SNAKE_CASE_ ) for item in self] )
def __getitem__(self : Dict , _lowerCAmelCase : List[Any] ):
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__(self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ):
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
A = self.head
for _ in range(SCREAMING_SNAKE_CASE_ ):
A = current.next
A = data
def A (self : List[Any] , _lowerCAmelCase : Any ):
self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE_ )
def A (self : Optional[Any] , _lowerCAmelCase : int ):
self.insert_nth(0 , SCREAMING_SNAKE_CASE_ )
def A (self : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
A = Node(SCREAMING_SNAKE_CASE_ )
if self.head is None:
A = new_node
elif index == 0:
A = self.head # link new_node to head
A = new_node
else:
A = self.head
for _ in range(index - 1 ):
A = temp.next
A = temp.next
A = new_node
def A (self : List[str] ): # print every node data
print(self )
def A (self : Optional[int] ):
return self.delete_nth(0 )
def A (self : Tuple ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def A (self : Tuple , _lowerCAmelCase : Dict = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
A = self.head # default first node
if index == 0:
A = self.head.next
else:
A = self.head
for _ in range(index - 1 ):
A = temp.next
A = temp.next
A = temp.next.next
return delete_node.data
def A (self : Tuple ):
return self.head is None
def A (self : Optional[int] ):
A = None
A = self.head
while current:
# Store the current node's next node.
A = current.next
# Make the current node's next point backwards
A = prev
# Make the previous node be the current node
A = current
# Make the current node the next node (to progress iteration)
A = next_node
# Return prev in order to put the head at the end
A = prev
def __a ( ) ->List[str]:
"""simple docstring"""
A = LinkedList()
assert linked_list.is_empty() is True
assert str(snake_case_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(snake_case_ ) == i
linked_list.insert_nth(snake_case_ , i + 1 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(snake_case_ ) == 9
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
A = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(-8 , 1 ) )
def __a ( ) ->Tuple:
"""simple docstring"""
A = [
-9,
100,
Node(77345112 ),
"""dlrow olleH""",
7,
5555,
0,
-192.55_555,
"""Hello, world!""",
77.9,
Node(10 ),
None,
None,
12.20,
]
A = LinkedList()
for i in test_input:
linked_list.insert_tail(snake_case_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(snake_case_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
A = linked_list.delete_head()
assert result == -9
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
A = linked_list.delete_tail()
assert result == 12.2
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
A = linked_list.delete_nth(10 )
assert result is None
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(snake_case_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(snake_case_ )
assert (
str(snake_case_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(snake_case_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __a ( ) ->List[str]:
"""simple docstring"""
from doctest import testmod
testmod()
A = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(snake_case_ )
print("""\nReading/changing Node data using indexing:""" )
print(f"""Element at Position 1: {linked_list[1]}""" )
A = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(snake_case_ )
print(f"""length of linked_list is : {len(snake_case_ )}""" )
if __name__ == "__main__":
main()
| 355 |
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
return [ord(UpperCAmelCase ) - 96 for elem in plain]
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def __a ( ) ->None:
"""simple docstring"""
A = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , UpperCAmelCase )
print("""Decoded:""" , decode(UpperCAmelCase ) )
if __name__ == "__main__":
main()
| 337 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class __UpperCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
def __init__(self : str , *_lowerCAmelCase : str , **_lowerCAmelCase : int ):
warnings.warn(
"""The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ChineseCLIPImageProcessor instead.""" , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 356 |
'''simple docstring'''
import os
def __a ( ) ->List[Any]:
"""simple docstring"""
A = os.path.join(os.path.dirname(UpperCAmelCase ) , """num.txt""" )
with open(UpperCAmelCase ) as file_hand:
return str(sum(int(UpperCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 337 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : str = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __UpperCAmelCase ( __lowerCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 'yolos'
def __init__(self : Dict , _lowerCAmelCase : List[Any]=768 , _lowerCAmelCase : Tuple=12 , _lowerCAmelCase : int=12 , _lowerCAmelCase : int=3072 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Union[str, Any]=0.0 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Optional[int]=0.02 , _lowerCAmelCase : Dict=1e-12 , _lowerCAmelCase : List[Any]=[512, 864] , _lowerCAmelCase : Optional[int]=16 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : List[str]=100 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Any=5 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Any=0.1 , **_lowerCAmelCase : Any , ):
super().__init__(**UpperCamelCase_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class __UpperCAmelCase ( __lowerCamelCase ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def A (self : Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A (self : Dict ):
return 1e-4
@property
def A (self : Dict ):
return 12
| 357 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
def __a ( UpperCAmelCase ) ->List[int]:
"""simple docstring"""
if isinstance(UpperCAmelCase , np.ndarray ):
return list(tensor.shape )
A = tf.shape(UpperCAmelCase )
if tensor.shape == tf.TensorShape(UpperCAmelCase ):
return dynamic
A = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase )]
def __a ( UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) ->tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1E-9 , axis=UpperCAmelCase , name=UpperCAmelCase )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase=-1 ) ->str:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
A , A = tf.nn.moments(UpperCAmelCase , axes=[axis] , keepdims=UpperCAmelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
A = [1] * inputs.shape.rank
A = shape_list(UpperCAmelCase )[axis]
A = tf.reshape(UpperCAmelCase , UpperCAmelCase )
A = tf.reshape(UpperCAmelCase , UpperCAmelCase )
# Compute layer normalization using the batch_normalization
# function.
A = tf.nn.batch_normalization(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , offset=UpperCAmelCase , scale=UpperCAmelCase , variance_epsilon=UpperCAmelCase , )
return outputs
def __a ( UpperCAmelCase , UpperCAmelCase=0 , UpperCAmelCase=-1 ) ->int:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
A = tf.shape(UpperCAmelCase )
A = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
A = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCAmelCase , UpperCAmelCase )
def __a ( UpperCAmelCase ) ->tf.Tensor:
"""simple docstring"""
if not isinstance(UpperCAmelCase , tf.Tensor ):
A = tf.convert_to_tensor(UpperCAmelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
A = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
A = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
A = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = "input_ids" ) ->None:
"""simple docstring"""
tf.debugging.assert_less(
UpperCAmelCase , tf.cast(UpperCAmelCase , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
A = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
A = [x for x in data if len(UpperCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
A = np.asarray(UpperCAmelCase )
A = 1
A = np.array_split(UpperCAmelCase , UpperCAmelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
A = np.array_split(UpperCAmelCase , UpperCAmelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCAmelCase ):
A = chunk_data
else:
A = data
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if name in group.attrs:
A = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase , """decode""" ) else n for n in group.attrs[name]]
else:
A = []
A = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(UpperCAmelCase , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def __a ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
def _expand_single_ad_tensor(UpperCAmelCase ):
if isinstance(UpperCAmelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCAmelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase )
| 337 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Optional[int] , _lowerCAmelCase : Union[str, Any] , ):
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = 99
A = 32
A = 2
A = 4
A = 37
A = """gelu"""
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.02
A = 3
A = 4
A = None
def A (self : List[str] ):
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A = ids_tensor([self.batch_size] , self.num_choices )
A = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A (self : Dict ):
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.prepare_config_and_inputs()
A = True
A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A (self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ):
A = TFEsmModel(config=__lowerCAmelCase )
A = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A = model(__lowerCAmelCase )
A = [input_ids, input_mask]
A = model(__lowerCAmelCase )
A = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A (self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : int , ):
A = True
A = TFEsmModel(config=__lowerCAmelCase )
A = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
A = model(__lowerCAmelCase )
A = [input_ids, input_mask]
A = model(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase )
# Also check the case where encoder outputs are not passed
A = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A (self : int , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ):
A = TFEsmForMaskedLM(config=__lowerCAmelCase )
A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A (self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ):
A = self.num_labels
A = TFEsmForTokenClassification(config=__lowerCAmelCase )
A = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A (self : List[str] ):
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCAmelCase = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def A (self : Tuple ):
A = TFEsmModelTester(self )
A = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def A (self : int ):
self.config_tester.run_common_tests()
def A (self : List[str] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A (self : Tuple ):
A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase )
def A (self : Optional[Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def A (self : Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def A (self : Optional[int] ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFEsmModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def A (self : List[Any] ):
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def A (self : str ):
pass
def A (self : Dict ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__lowerCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A = model.get_bias()
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
for k, v in name.items():
assert isinstance(__lowerCAmelCase , tf.Variable )
else:
A = model.get_output_embeddings()
assert x is None
A = model.get_bias()
assert name is None
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A (self : Union[str, Any] ):
A = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(__lowerCAmelCase )[0]
A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __lowerCAmelCase )
# compare the actual values for a slice.
A = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def A (self : List[str] ):
A = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A = model(__lowerCAmelCase )[0]
# compare the actual values for a slice.
A = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 358 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_lowerCamelCase : Any = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : int , _lowerCAmelCase : int = 14 ):
if group not in primes:
raise ValueError("""Unsupported Group""" )
A = primes[group]["""prime"""]
A = primes[group]["""generator"""]
A = int(hexlify(urandom(32 ) ) , base=16 )
def A (self : Optional[Any] ):
return hex(self.__private_key )[2:]
def A (self : Union[str, Any] ):
A = pow(self.generator , self.__private_key , self.prime )
return hex(_lowerCAmelCase )[2:]
def A (self : Any , _lowerCAmelCase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_lowerCAmelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def A (self : List[str] , _lowerCAmelCase : str ):
A = int(_lowerCAmelCase , base=16 )
if not self.is_valid_public_key(_lowerCAmelCase ):
raise ValueError("""Invalid public key""" )
A = pow(_lowerCAmelCase , self.__private_key , self.prime )
return shaaaa(str(_lowerCAmelCase ).encode() ).hexdigest()
@staticmethod
def A (_lowerCAmelCase : int , _lowerCAmelCase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_lowerCAmelCase , (prime - 1) // 2 , _lowerCAmelCase ) == 1
)
@staticmethod
def A (_lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : int = 14 ):
A = int(_lowerCAmelCase , base=16 )
A = int(_lowerCAmelCase , base=16 )
A = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""Invalid public key""" )
A = pow(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return shaaaa(str(_lowerCAmelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCAmelCase = 'yolos'
def __init__(self : List[str] , _lowerCAmelCase : Optional[int]=768 , _lowerCAmelCase : Optional[int]=12 , _lowerCAmelCase : Any=12 , _lowerCAmelCase : List[str]=3072 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : Optional[int]=1e-12 , _lowerCAmelCase : List[Any]=[512, 864] , _lowerCAmelCase : Any=16 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[str]=100 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : int=5 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Dict=0.1 , **_lowerCAmelCase : Dict , ):
super().__init__(**a_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class __UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def A (self : str ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A (self : Tuple ):
return 1e-4
@property
def A (self : Optional[int] ):
return 12
| 359 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
else:
return a * actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(UpperCAmelCase , UpperCAmelCase )
return actual_power(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 337 | 0 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Dict , _lowerCAmelCase : List[Any] , ):
A = parent
A = 13
A = 7
A = 30
A = self.seq_length + self.mem_len
A = 15
A = True
A = True
A = 99
A = [10, 50, 80]
A = 32
A = 32
A = 4
A = 8
A = 128
A = 2
A = 2
A = None
A = 1
A = 0
A = 3
A = self.vocab_size - 1
A = 0.01
def A (self : Any ):
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def A (self : str ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def A (self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] ):
A = TFTransfoXLModel(__lowerCAmelCase )
A , A = model(__lowerCAmelCase ).to_tuple()
A = {"""input_ids""": input_ids_a, """mems""": mems_a}
A , A = model(__lowerCAmelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def A (self : str , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] ):
A = TFTransfoXLLMHeadModel(__lowerCAmelCase )
A , A = model(__lowerCAmelCase ).to_tuple()
A = {"""input_ids""": input_ids_a, """labels""": lm_labels}
A , A = model(__lowerCAmelCase ).to_tuple()
A , A = model([input_ids_a, mems_a] ).to_tuple()
A = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
A , A = model(__lowerCAmelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def A (self : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
A = TFTransfoXLForSequenceClassification(__lowerCAmelCase )
A = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A (self : Union[str, Any] ):
A = self.prepare_config_and_inputs()
((A) , (A) , (A) , (A)) = config_and_inputs
A = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__lowerCAmelCase = () if is_tf_available() else ()
__lowerCAmelCase = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def A (self : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def A (self : Dict ):
A = TFTransfoXLModelTester(self )
A = ConfigTester(self , config_class=__lowerCAmelCase , d_embed=37 )
def A (self : List[Any] ):
self.config_tester.run_common_tests()
def A (self : int ):
self.model_tester.set_seed()
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__lowerCAmelCase )
def A (self : int ):
self.model_tester.set_seed()
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__lowerCAmelCase )
def A (self : Any ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__lowerCAmelCase )
def A (self : Tuple ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
A = model_class(__lowerCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
A = model.get_output_embeddings()
assert isinstance(__lowerCAmelCase , tf.keras.layers.Layer )
A = model.get_bias()
assert name is None
else:
A = model.get_output_embeddings()
assert x is None
A = model.get_bias()
assert name is None
def A (self : Dict ):
pass
@slow
def A (self : Union[str, Any] ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFTransfoXLModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def A (self : List[Any] ):
pass
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def A (self : List[str] ):
A = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
A = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
A = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
A = model.generate(__lowerCAmelCase , max_length=200 , do_sample=__lowerCAmelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , __lowerCAmelCase )
| 360 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
if isinstance(UpperCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCAmelCase :
'''simple docstring'''
def A (self : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ):
pass
def A (self : List[str] ):
pass
def A (self : Union[str, Any] ):
pass
def A (self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ):
A = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def A (self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A (self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=None , **_lowerCAmelCase : List[Any] ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = {"""vision_model""": vision_model, """text_model""": text_model}
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A (self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Any ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
A = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
A = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
A = after_output[0].numpy()
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def A (self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any=None , **_lowerCAmelCase : List[Any] ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
A = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
A = to_atuple(vision_model.config.image_size )
A = to_atuple(vision_model.config.patch_size )
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A (self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ):
A = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def A (self : List[str] ):
A = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def A (self : Optional[int] ):
A = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def A (self : List[Any] ):
A = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def A (self : int ):
A = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def A (self : int ):
A = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def A (self : Tuple ):
A , A = self.get_pretrained_model_and_inputs()
A = model_a(**_lowerCAmelCase )
A = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
A = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
A = model_a(**_lowerCAmelCase )
A = after_outputs[0].numpy()
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : int ):
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : int ):
A = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
A = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : Union[str, Any] ):
A = TFViTModelTester(self )
A = TFBertModelTester(self )
A = vit_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[int] ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : Any ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
A = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A = to_atuple(vision_model.config.image_size )
A = to_atuple(vision_model.config.patch_size )
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A (self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : str ):
A = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
A = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : str ):
A = TFDeiTModelTester(self )
A = TFRobertaModelTester(self )
A = vit_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : Dict ):
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
A = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
A = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : Optional[Any] ):
A = TFCLIPVisionModelTester(self )
A = TFBertModelTester(self )
A = clip_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A (self : Any ):
A = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
A = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
A = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
A = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 337 | 0 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_lowerCamelCase : Dict = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
_lowerCamelCase : List[str] = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
_lowerCamelCase : str = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
return float((preds == labels).mean() )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
A = simple_accuracy(a__ , a__ )
A = float(fa_score(y_true=a__ , y_pred=a__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
A = float(pearsonr(a__ , a__ )[0] )
A = float(spearmanr(a__ , a__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def A (self : Dict ) -> List[Any]:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def A (self : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> str:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_snake_case , _snake_case )}
elif self.config_name == "stsb":
return pearson_and_spearman(_snake_case , _snake_case )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_snake_case , _snake_case )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_snake_case , _snake_case )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 361 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Any = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
_lowerCamelCase : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_lowerCamelCase : int = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_lowerCamelCase : List[str] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
_lowerCamelCase : int = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
_lowerCamelCase : int = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
_lowerCamelCase : Optional[int] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
_lowerCamelCase : Optional[Any] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
_lowerCamelCase : Any = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
_lowerCamelCase : Union[str, Any] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
_lowerCamelCase : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
_lowerCamelCase : int = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
_lowerCamelCase : Union[str, Any] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
_lowerCamelCase : Any = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
_lowerCamelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_lowerCamelCase : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_lowerCamelCase : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_lowerCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_lowerCamelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_lowerCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_lowerCamelCase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_lowerCamelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_lowerCamelCase : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_lowerCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_lowerCamelCase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_MAPPING
_lowerCamelCase : Optional[Any] = auto_class_update(FlaxAutoModel)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_lowerCamelCase : List[str] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCamelCase : List[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_lowerCamelCase : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowerCamelCase : Tuple = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCamelCase : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_lowerCamelCase : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_lowerCamelCase : str = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_lowerCamelCase : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_lowerCamelCase : List[Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCamelCase : Union[str, Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_lowerCamelCase : Optional[int] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_lowerCamelCase : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 337 | 0 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
_lowerCamelCase : str = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def __a ( ) ->List[Any]:
"""simple docstring"""
A = os.path.dirname(os.path.realpath(lowercase_ ) )
A = os.path.join(lowercase_ , """words.txt""" )
A = """"""
with open(lowercase_ ) as f:
A = f.readline()
A = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
A = [
word
for word in [sum(ord(lowercase_ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase_ )
if __name__ == "__main__":
print(solution())
| 362 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def A (self : Any ):
A = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
A = load_dataset("""ashraq/esc50""" )
A = dataset["""train"""]["""audio"""][-1]["""array"""]
A = audio_classifier(_lowerCAmelCase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def A (self : List[str] ):
pass
@slow
@require_torch
def A (self : int ):
A = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
A = load_dataset("""ashraq/esc50""" )
A = dataset["""train"""]["""audio"""][-1]["""array"""]
A = audio_classifier(_lowerCAmelCase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] , )
A = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
A = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def A (self : Tuple ):
pass
| 337 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : str = logging.get_logger(__name__)
class __UpperCAmelCase ( A__ , A__ ):
'''simple docstring'''
__lowerCAmelCase = '''maskformer-swin'''
__lowerCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__(self : Optional[int] , _lowerCAmelCase : List[Any]=224 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : List[str]=96 , _lowerCAmelCase : Any=[2, 2, 6, 2] , _lowerCAmelCase : Tuple=[3, 6, 12, 24] , _lowerCAmelCase : Union[str, Any]=7 , _lowerCAmelCase : int=4.0 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : Union[str, Any]=1e-5 , _lowerCAmelCase : str=None , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int , ):
super().__init__(**_a )
A = image_size
A = patch_size
A = num_channels
A = embed_dim
A = depths
A = len(_a )
A = num_heads
A = window_size
A = mlp_ratio
A = qkv_bias
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = drop_path_rate
A = hidden_act
A = use_absolute_embeddings
A = layer_norm_eps
A = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A = int(embed_dim * 2 ** (len(_a ) - 1) )
A = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_a ) + 1 )]
A , A = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 363 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowerCamelCase : Dict = 'src/diffusers'
_lowerCamelCase : Dict = '.'
# This is to make sure the diffusers module imported is the one in the repo.
_lowerCamelCase : List[str] = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowerCamelCase : Tuple = spec.loader.load_module()
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
return line.startswith(UpperCAmelCase ) or len(UpperCAmelCase ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , UpperCAmelCase ) is not None
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = object_name.split(""".""" )
A = 0
# First let's find the module where our object lives.
A = parts[i]
while i < len(UpperCAmelCase ) and not os.path.isfile(os.path.join(UpperCAmelCase , f"""{module}.py""" ) ):
i += 1
if i < len(UpperCAmelCase ):
A = os.path.join(UpperCAmelCase , parts[i] )
if i >= len(UpperCAmelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(UpperCAmelCase , f"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A = f.readlines()
# Now let's find the class / func in the code!
A = """"""
A = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A = line_index
while line_index < len(UpperCAmelCase ) and _should_continue(lines[line_index] , UpperCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
return "".join(UpperCAmelCase )
_lowerCamelCase : str = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
_lowerCamelCase : Any = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
_lowerCamelCase : str = re.compile(R'<FILL\s+[^>]*>')
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
A = code.split("""\n""" )
A = 0
while idx < len(UpperCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
A = len(get_indent(UpperCAmelCase ) ) > 0
if has_indent:
A = f"""class Bla:\n{code}"""
A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCAmelCase )
A = black.format_str(UpperCAmelCase , mode=UpperCAmelCase )
A , A = style_docstrings_in_code(UpperCAmelCase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def __a ( UpperCAmelCase , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
with open(UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A = f.readlines()
A = []
A = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase ):
A = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A , A , A = search.groups()
A = find_code_in_diffusers(UpperCAmelCase )
A = get_indent(UpperCAmelCase )
A = line_index + 1 if indent == theoretical_indent else line_index + 2
A = theoretical_indent
A = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A = True
while line_index < len(UpperCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase ):
break
A = lines[line_index]
A = _should_continue(UpperCAmelCase , UpperCAmelCase ) and re.search(f"""^{indent}# End copy""" , UpperCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
A = """""".join(UpperCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
A = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCAmelCase ) is None]
A = """\n""".join(UpperCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase ) > 0:
A = replace_pattern.replace("""with""" , """""" ).split(""",""" )
A = [_re_replace_pattern.search(UpperCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A , A , A = pattern.groups()
A = re.sub(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if option.strip() == "all-casing":
A = re.sub(obja.lower() , obja.lower() , UpperCAmelCase )
A = re.sub(obja.upper() , obja.upper() , UpperCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A = blackify(lines[start_index - 1] + theoretical_code )
A = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A = lines[:start_index] + [theoretical_code] + lines[line_index:]
A = start_index + 1
if overwrite and len(UpperCAmelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase )
return diffs
def __a ( UpperCAmelCase = False ) ->int:
"""simple docstring"""
A = glob.glob(os.path.join(UpperCAmelCase , """**/*.py""" ) , recursive=UpperCAmelCase )
A = []
for filename in all_files:
A = is_copy_consistent(UpperCAmelCase , UpperCAmelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(UpperCAmelCase ) > 0:
A = """\n""".join(UpperCAmelCase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_lowerCamelCase : Any = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 337 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = TextToVideoSDPipeline
__lowerCAmelCase = TEXT_TO_IMAGE_PARAMS
__lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__lowerCAmelCase = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def A (self : Optional[int] ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A = CLIPTextModel(_SCREAMING_SNAKE_CASE )
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def A (self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str]=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
A = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
A = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
A = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def A (self : Tuple ):
A = "cpu" # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = TextToVideoSDPipeline(**_SCREAMING_SNAKE_CASE )
A = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A = "np"
A = sd_pipe(**_SCREAMING_SNAKE_CASE ).frames
A = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A (self : int ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def A (self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def A (self : Optional[Any] ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def A (self : int ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def A (self : Tuple ):
pass
def A (self : Any ):
return super().test_progress_bar()
@slow
@skip_mps
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : List[str] ):
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A = pipe.to("""cuda""" )
A = "Spiderman is surfing"
A = torch.Generator(device="""cpu""" ).manual_seed(0 )
A = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="""pt""" ).frames
A = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def A (self : List[Any] ):
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A = pipe.to("""cuda""" )
A = "Spiderman is surfing"
A = torch.Generator(device="""cpu""" ).manual_seed(0 )
A = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""pt""" ).frames
A = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 364 |
'''simple docstring'''
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
A = credit_card_number
A = 0
A = len(UpperCAmelCase ) - 2
for i in range(UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
A = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
A = cc_number[:i] + str(UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
A = f"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(f"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(UpperCAmelCase ) <= 16:
print(f"""{error_message} of its length.""" )
return False
if not validate_initial_digits(UpperCAmelCase ):
print(f"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(UpperCAmelCase ):
print(f"""{error_message} it fails the Luhn check.""" )
return False
print(f"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 337 | 0 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(_UpperCAmelCase , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
A = _distribute_shards(**_UpperCAmelCase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
A = _split_gen_kwargs(_UpperCAmelCase , _UpperCAmelCase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(_UpperCAmelCase ):
_number_of_shards_in_gen_kwargs(_UpperCAmelCase )
else:
A = _number_of_shards_in_gen_kwargs(_UpperCAmelCase )
assert out == expected
| 365 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Any , _lowerCAmelCase : List[Any] ):
A = str(id_ )
A = None
A = None
A = []
A = {} # {vertex:distance}
def __lt__(self : List[Any] , _lowerCAmelCase : Tuple ):
return self.key < other.key
def __repr__(self : str ):
return self.id
def A (self : Union[str, Any] , _lowerCAmelCase : List[str] ):
self.neighbors.append(_lowerCAmelCase )
def A (self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ):
A = weight
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , UpperCAmelCase )
graph[b - 1].add_edge(graph[a - 1] , UpperCAmelCase )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->list:
"""simple docstring"""
A = []
for u in graph:
A = math.inf
A = None
A = 0
A = graph[:]
while q:
A = min(UpperCAmelCase )
q.remove(UpperCAmelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
A = u
A = u.edges[v.id]
for i in range(1 , len(UpperCAmelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Iterator[tuple]:
"""simple docstring"""
for u in graph:
A = math.inf
A = None
A = 0
A = list(UpperCAmelCase )
hq.heapify(UpperCAmelCase )
while h:
A = hq.heappop(UpperCAmelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
A = u
A = u.edges[v.id]
hq.heapify(UpperCAmelCase )
for i in range(1 , len(UpperCAmelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __a ( ) ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
'''simple docstring'''
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
A = len(SCREAMING_SNAKE_CASE_ )
while cur > 1:
# Find the maximum number in arr
A = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
A = arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE_ )]
# Reverse whole list
A = arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE_ )]
cur -= 1
return arr
if __name__ == "__main__":
_lowerCamelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase : List[Any] = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 366 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''perceiver'''
def __init__(self : Dict , _lowerCAmelCase : List[str]=256 , _lowerCAmelCase : Any=1280 , _lowerCAmelCase : Dict=768 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : Optional[int]=26 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : List[Any]="kv" , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : int=1 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Any=1e-12 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : int=262 , _lowerCAmelCase : int=2048 , _lowerCAmelCase : int=56 , _lowerCAmelCase : List[Any]=[368, 496] , _lowerCAmelCase : List[Any]=16 , _lowerCAmelCase : Any=1920 , _lowerCAmelCase : Optional[int]=16 , _lowerCAmelCase : List[Any]=[1, 16, 224, 224] , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
A = num_latents
A = d_latents
A = d_model
A = num_blocks
A = num_self_attends_per_block
A = num_self_attention_heads
A = num_cross_attention_heads
A = qk_channels
A = v_channels
A = cross_attention_shape_for_attention
A = self_attention_widening_factor
A = cross_attention_widening_factor
A = hidden_act
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = use_query_residual
# masked language modeling attributes
A = vocab_size
A = max_position_embeddings
# image classification attributes
A = image_size
# flow attributes
A = train_size
# multimodal autoencoding attributes
A = num_frames
A = audio_samples_per_frame
A = samples_per_patch
A = output_shape
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@property
def A (self : List[str] ):
if self.task == "multiple-choice":
A = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def A (self : Dict ):
return 1e-4
def A (self : List[Any] , _lowerCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 40 , _lowerCAmelCase : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = preprocessor.num_special_tokens_to_add(_lowerCAmelCase )
A = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
A = [""" """.join(["""a"""] ) * seq_length] * batch_size
A = dict(preprocessor(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
A = inputs.pop("""input_ids""" )
return inputs
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
A = self._generate_dummy_images(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A = dict(preprocessor(images=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
A = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 337 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Dict = '▁'
_lowerCamelCase : int = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
_lowerCamelCase : List[Any] = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
_lowerCamelCase : Any = {'vinai/bartpho-syllable': 1024}
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__(self : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int="<s>" , _lowerCAmelCase : List[str]="</s>" , _lowerCAmelCase : Tuple="</s>" , _lowerCAmelCase : Any="<s>" , _lowerCAmelCase : Optional[int]="<unk>" , _lowerCAmelCase : List[str]="<pad>" , _lowerCAmelCase : List[Any]="<mask>" , _lowerCAmelCase : int = None , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
A = vocab_file
A = monolingual_vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
A = {}
A = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCamelCase ) not in self.fairseq_tokens_to_ids:
A = cnt
cnt += 1
with open(__lowerCamelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
A = line.strip().split()[0]
A = len(self.fairseq_tokens_to_ids )
if str(__lowerCamelCase ) not in self.fairseq_tokens_to_ids:
A = len(self.fairseq_tokens_to_ids )
A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self : Dict ):
A = self.__dict__.copy()
A = None
A = self.sp_model.serialized_model_proto()
return state
def __setstate__(self : List[str] , _lowerCAmelCase : Optional[int] ):
A = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A (self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A = [self.cls_token_id]
A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A (self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple = None , _lowerCAmelCase : Any = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def A (self : Any , _lowerCAmelCase : str , _lowerCAmelCase : int = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A (self : str ):
return len(self.fairseq_ids_to_tokens )
def A (self : List[str] ):
A = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A (self : Dict , _lowerCAmelCase : int ):
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def A (self : int , _lowerCAmelCase : List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def A (self : Optional[int] , _lowerCAmelCase : Optional[Any] ):
return self.fairseq_ids_to_tokens[index]
def A (self : int , _lowerCAmelCase : Optional[int] ):
A = """""".join(__lowerCamelCase ).replace(__lowerCamelCase , """ """ ).strip()
return out_string
def A (self : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A = os.path.join(
__lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(
__lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , """wb""" ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(__lowerCamelCase )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 367 |
'''simple docstring'''
import math
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : int , _lowerCAmelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
A = n
A = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # adjacency matrix for weight
A = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # dp[i][j] stores minimum distance from i to j
def A (self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
A = w
def A (self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def A (self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ):
return self.dp[u][v]
if __name__ == "__main__":
_lowerCamelCase : str = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Dict , _lowerCAmelCase : Union[str, Any] ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
A = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_lowerCamelCase )
def A (self : Optional[int] ):
A = """sshleifer/tiny-gpt2"""
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
A = PyTorchBenchmark(_lowerCamelCase )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A (self : int ):
A = """sgugger/tiny-distilbert-classification"""
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , only_pretrain_model=_lowerCamelCase , )
A = PyTorchBenchmark(_lowerCamelCase )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A (self : Any ):
A = """sshleifer/tiny-gpt2"""
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , torchscript=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
A = PyTorchBenchmark(_lowerCamelCase )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def A (self : Union[str, Any] ):
A = """sshleifer/tiny-gpt2"""
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , fpaa=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
A = PyTorchBenchmark(_lowerCamelCase )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A (self : Union[str, Any] ):
A = """sshleifer/tiny-gpt2"""
A = AutoConfig.from_pretrained(_lowerCamelCase )
# set architectures equal to `None`
A = None
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
A = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A (self : str ):
A = """sshleifer/tiny-gpt2"""
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
A = PyTorchBenchmark(_lowerCamelCase )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can\'t do half precision""" )
def A (self : int ):
A = """sshleifer/tiny-gpt2"""
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowerCamelCase , multi_process=_lowerCamelCase , )
A = PyTorchBenchmark(_lowerCamelCase )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A (self : List[str] ):
A = """sshleifer/tiny-gpt2"""
A = AutoConfig.from_pretrained(_lowerCamelCase )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
A = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A (self : List[str] ):
A = """sshleifer/tinier_bart"""
A = AutoConfig.from_pretrained(_lowerCamelCase )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
A = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A (self : Dict ):
A = """sshleifer/tiny-gpt2"""
A = AutoConfig.from_pretrained(_lowerCamelCase )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
A = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A (self : Optional[Any] ):
A = """sshleifer/tinier_bart"""
A = AutoConfig.from_pretrained(_lowerCamelCase )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
A = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A (self : Optional[Any] ):
A = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , save_to_csv=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowerCamelCase , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(_lowerCamelCase , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(_lowerCamelCase , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(_lowerCamelCase , """train_time.csv""" ) , env_info_csv_file=os.path.join(_lowerCamelCase , """env.csv""" ) , multi_process=_lowerCamelCase , )
A = PyTorchBenchmark(_lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCamelCase , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , """env.csv""" ) ).exists() )
def A (self : Tuple ):
A = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_lowerCAmelCase : Union[str, Any] ):
self.assertTrue(hasattr(_lowerCamelCase , """sequential""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """cumulative""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """current""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowerCamelCase , """log.txt""" ) , log_print=_lowerCamelCase , trace_memory_line_by_line=_lowerCamelCase , multi_process=_lowerCamelCase , )
A = PyTorchBenchmark(_lowerCamelCase )
A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_lowerCamelCase , """log.txt""" ) ).exists() )
| 368 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase : List[str] = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase : List[str] = {
'Salesforce/codegen-350M-mono': 2048,
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = CodeGenTokenizer
def __init__(self : int , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Optional[Any]="<|endoftext|>" , _lowerCAmelCase : Dict="<|endoftext|>" , _lowerCAmelCase : Dict="<|endoftext|>" , _lowerCAmelCase : Any=False , **_lowerCAmelCase : Optional[int] , ):
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
if kwargs.pop("""add_bos_token""" , _lowerCAmelCase ):
A = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
F"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
F"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowerCAmelCase ) != add_prefix_space:
A = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
A = add_prefix_space
A = pre_tok_class(**_lowerCAmelCase )
A = add_prefix_space
def A (self : int , *_lowerCAmelCase : int , **_lowerCAmelCase : List[Any] ):
A = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[Any] ):
A = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def A (self : Tuple , _lowerCAmelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[List[str]] = None , **_lowerCAmelCase : Tuple , ):
A = super().decode(
token_ids=_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase , **_lowerCAmelCase , )
if truncate_before_pattern is not None and len(_lowerCAmelCase ) > 0:
A = self.truncate(_lowerCAmelCase , _lowerCAmelCase )
return decoded_text
def A (self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
def find_re(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ):
A = pattern.search(_lowerCAmelCase , _lowerCAmelCase )
return m.start() if m else -1
A = [re.compile(_lowerCAmelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
A = list(re.finditer("""^print""" , _lowerCAmelCase , re.MULTILINE ) )
if len(_lowerCAmelCase ) > 1:
A = completion[: prints[1].start()]
A = list(re.finditer("""^def""" , _lowerCAmelCase , re.MULTILINE ) )
if len(_lowerCAmelCase ) > 1:
A = completion[: defs[1].start()]
A = 0
A = [
pos for pos in [find_re(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for terminal in terminals] if pos != -1
]
if len(_lowerCAmelCase ) > 0:
return completion[: min(_lowerCAmelCase )]
else:
return completion
| 337 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''donut-swin'''
__lowerCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__(self : List[str] , _lowerCAmelCase : Union[str, Any]=224 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : str=96 , _lowerCAmelCase : Optional[Any]=[2, 2, 6, 2] , _lowerCAmelCase : Dict=[3, 6, 12, 24] , _lowerCAmelCase : Union[str, Any]=7 , _lowerCAmelCase : List[str]=4.0 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Tuple="gelu" , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : str=1e-5 , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_a )
A = image_size
A = patch_size
A = num_channels
A = embed_dim
A = depths
A = len(_a )
A = num_heads
A = window_size
A = mlp_ratio
A = qkv_bias
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = drop_path_rate
A = hidden_act
A = use_absolute_embeddings
A = layer_norm_eps
A = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A = int(embed_dim * 2 ** (len(_a ) - 1) )
| 369 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Optional[Any] = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 337 | 0 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
_lowerCamelCase : List[str] = False
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A (self : Dict ):
A = VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A = "A painting of a squirrel eating a burger "
A = torch.manual_seed(0 )
A = pipe(
prompt=_a , generator=_a , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_a )
A = VersatileDiffusionTextToImagePipeline.from_pretrained(_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A = generator.manual_seed(0 )
A = pipe(
prompt=_a , generator=_a , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def A (self : Optional[Any] ):
A = VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A = "A painting of a squirrel eating a burger "
A = torch.manual_seed(0 )
A = pipe(
prompt=_a , generator=_a , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
A = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 370 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowerCAmelCase )
try:
pickle.loads(pickle.dumps(_lowerCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 337 | 0 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
_lowerCamelCase : List[str] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
_lowerCamelCase : Union[str, Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def __a ( UpperCAmelCase ) ->list[list[int]]:
"""simple docstring"""
A = []
for i in range(len(_lowerCAmelCase ) ):
A = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
A = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(_lowerCAmelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(_lowerCAmelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(_lowerCAmelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
A = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(_lowerCAmelCase )
return next_generation
def __a ( UpperCAmelCase , UpperCAmelCase ) ->list[Image.Image]:
"""simple docstring"""
A = []
for _ in range(_lowerCAmelCase ):
# Create output image
A = Image.new("""RGB""" , (len(cells[0] ), len(_lowerCAmelCase )) )
A = img.load()
# Save cells to image
for x in range(len(_lowerCAmelCase ) ):
for y in range(len(cells[0] ) ):
A = 255 - cells[y][x] * 255
A = (colour, colour, colour)
# Save image
images.append(_lowerCAmelCase )
A = new_generation(_lowerCAmelCase )
return images
if __name__ == "__main__":
_lowerCamelCase : Any = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 371 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Tuple , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Dict ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Optional[int] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Any ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : List[str] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : str ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : List[str] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : int ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : List[Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : str ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : List[str] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : List[Any] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Union[str, Any] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : int ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Any , *_lowerCAmelCase : str , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : List[Any] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : List[str] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Any ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Optional[int] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Dict ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Union[str, Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : List[str] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Union[str, Any] , *_lowerCAmelCase : Any , **_lowerCAmelCase : str ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Optional[Any] , *_lowerCAmelCase : int , **_lowerCAmelCase : Any ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Dict , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : int ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Any ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Optional[Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Tuple ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 337 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
A = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
A = emb.weight.shape
A = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
A = emb.weight.data
return lin_layer
def __a ( UpperCAmelCase , UpperCAmelCase="facebook/mbart-large-en-ro" , UpperCAmelCase=False , UpperCAmelCase=False ) ->Optional[Any]:
"""simple docstring"""
A = torch.load(__lowerCAmelCase , map_location="""cpu""" )['''model''']
remove_ignore_keys_(__lowerCAmelCase )
A = state_dict['''encoder.embed_tokens.weight'''].shape[0]
A = MBartConfig.from_pretrained(__lowerCAmelCase , vocab_size=__lowerCAmelCase )
if mbart_aa and finetuned:
A = '''relu'''
A = state_dict['''decoder.embed_tokens.weight''']
A = MBartForConditionalGeneration(__lowerCAmelCase )
model.model.load_state_dict(__lowerCAmelCase )
if finetuned:
A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
_lowerCamelCase : Any = parser.parse_args()
_lowerCamelCase : List[Any] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 350 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __a ( ) ->str:
"""simple docstring"""
A = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=UpperCAmelCase , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=UpperCAmelCase , default=5 )
parser.add_argument("""--batch_size""" , type=UpperCAmelCase , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=UpperCAmelCase , default=1 )
parser.add_argument("""--freeze""" , type=UpperCAmelCase , default=UpperCAmelCase )
parser.add_argument("""--learning_rate""" , type=UpperCAmelCase , default=5E-4 )
parser.add_argument("""--seed""" , type=UpperCAmelCase , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=UpperCAmelCase , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=UpperCAmelCase , default=10 )
parser.add_argument("""--weight_decay""" , type=UpperCAmelCase , default=0.01 )
parser.add_argument("""--output_dir""" , type=UpperCAmelCase , default="""./results""" )
return parser.parse_args()
_lowerCamelCase : Optional[Any] = load('accuracy')
def __a ( UpperCAmelCase ) ->Any:
"""simple docstring"""
A , A = eval_pred
A = np.argmax(UpperCAmelCase , axis=1 )
return metric.compute(predictions=UpperCAmelCase , references=UpperCAmelCase )
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : Union[str, Any] , _lowerCAmelCase : Any ):
super().__init__()
A = trainer
def A (self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , **_lowerCAmelCase : List[Any] ):
if control.should_evaluate:
A = deepcopy(_lowerCAmelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def __a ( ) ->Optional[int]:
"""simple docstring"""
A = get_args()
set_seed(args.seed )
A = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
A = dataset.train_test_split(test_size=0.2 )
A = train_test["""test"""].train_test_split(test_size=0.5 )
A = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
A = AutoTokenizer.from_pretrained(args.model_ckpt )
A = tokenizer.eos_token
A = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
A = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
A = False
A = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(UpperCAmelCase ):
A = tokenizer(example["""src"""] , truncation=UpperCAmelCase , max_length=1024 )
A = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
A = train_test_validation.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=train_test_validation["""train"""].column_names , )
A = DataCollatorWithPadding(tokenizer=UpperCAmelCase )
A = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
A = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=UpperCAmelCase , data_collator=UpperCAmelCase , compute_metrics=UpperCAmelCase , )
print("""Training...""" )
trainer.add_callback(CustomCallback(UpperCAmelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 337 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A (self : Any ):
A = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
A = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
A = 'The dog is cute and lives in the garden house'
A = jnp.array([tokenizer.encode(_lowerCAmelCase )] )
A = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
A = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
A = model(_lowerCAmelCase )['last_hidden_state']
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _lowerCAmelCase , atol=1e-3 ) )
| 351 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
_lowerCamelCase : Dict = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
_lowerCamelCase : Optional[Any] = {
'ctrl': 256,
}
_lowerCamelCase : List[str] = {
'Pregnancy': 16_8629,
'Christianity': 7675,
'Explain': 10_6423,
'Fitness': 6_3440,
'Saving': 6_3163,
'Ask': 2_7171,
'Ass': 9_5985,
'Joke': 16_3509,
'Questions': 4_5622,
'Thoughts': 4_9605,
'Retail': 5_2342,
'Feminism': 16_4338,
'Writing': 1_1992,
'Atheism': 19_2263,
'Netflix': 4_8616,
'Computing': 3_9639,
'Opinion': 4_3213,
'Alone': 4_4967,
'Funny': 5_8917,
'Gaming': 4_0358,
'Human': 4088,
'India': 1331,
'Joker': 7_7138,
'Diet': 3_6206,
'Legal': 1_1859,
'Norman': 4939,
'Tip': 7_2689,
'Weight': 5_2343,
'Movies': 4_6273,
'Running': 2_3425,
'Science': 2090,
'Horror': 3_7793,
'Confession': 6_0572,
'Finance': 1_2250,
'Politics': 1_6360,
'Scary': 19_1985,
'Support': 1_2654,
'Technologies': 3_2516,
'Teenage': 6_6160,
'Event': 3_2769,
'Learned': 6_7460,
'Notion': 18_2770,
'Wikipedia': 3_7583,
'Books': 6665,
'Extract': 7_6050,
'Confessions': 10_2701,
'Conspiracy': 7_5932,
'Links': 6_3674,
'Narcissus': 15_0425,
'Relationship': 5_4766,
'Relationships': 13_4796,
'Reviews': 4_1671,
'News': 4256,
'Translation': 2_6820,
'multilingual': 12_8406,
}
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
A = set(UpperCAmelCase )
return pairs
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = CONTROL_CODES
def __init__(self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any]="<unk>" , **_lowerCAmelCase : Dict ):
super().__init__(unk_token=_lowerCAmelCase , **_lowerCAmelCase )
with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
A = json.load(_lowerCAmelCase )
A = {v: k for k, v in self.encoder.items()}
with open(_lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
A = merges_handle.read().split("""\n""" )[1:-1]
A = [tuple(merge.split() ) for merge in merges]
A = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
A = {}
@property
def A (self : Tuple ):
return len(self.encoder )
def A (self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def A (self : Optional[int] , _lowerCAmelCase : Optional[int] ):
if token in self.cache:
return self.cache[token]
A = tuple(_lowerCAmelCase )
A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
A = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(_lowerCAmelCase ):
try:
A = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(_lowerCAmelCase )
A = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
A = get_pairs(_lowerCAmelCase )
A = """@@ """.join(_lowerCAmelCase )
A = word[:-4]
A = word
return word
def A (self : List[str] , _lowerCAmelCase : Dict ):
A = []
A = re.findall(r"""\S+\n?""" , _lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(_lowerCAmelCase ).split(""" """ ) ) )
return split_tokens
def A (self : str , _lowerCAmelCase : int ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def A (self : Dict , _lowerCAmelCase : str ):
return self.decoder.get(_lowerCAmelCase , self.unk_token )
def A (self : List[str] , _lowerCAmelCase : List[Any] ):
A = """ """.join(_lowerCAmelCase ).replace("""@@ """ , """""" ).strip()
return out_string
def A (self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" )
A = 0
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
A = token_index
writer.write(""" """.join(_lowerCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 337 | 0 |
def __a ( UpperCAmelCase , UpperCAmelCase = " " ) ->list:
"""simple docstring"""
A = []
A = 0
for index, char in enumerate(UpperCAmelCase ):
if char == separator:
split_words.append(string[last_index:index] )
A = index + 1
elif index + 1 == len(UpperCAmelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 352 |
'''simple docstring'''
_lowerCamelCase : List[Any] = 'Input must be a string of 8 numbers plus letter'
_lowerCamelCase : str = 'TRWAGMYFPDXBNJZSQVHLCKE'
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
A = f"""Expected string as input, found {type(UpperCAmelCase ).__name__}"""
raise TypeError(UpperCAmelCase )
A = spanish_id.replace("""-""" , """""" ).upper()
if len(UpperCAmelCase ) != 9:
raise ValueError(UpperCAmelCase )
try:
A = int(spanish_id_clean[0:8] )
A = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(UpperCAmelCase ) from ex
if letter.isdigit():
raise ValueError(UpperCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class __UpperCAmelCase ( snake_case_ ):
'''simple docstring'''
__lowerCAmelCase = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__(self : str , **_lowerCAmelCase : Any ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A = deprecated_arg[3:]
A = not kwargs.pop(_lowerCAmelCase )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
A = kwargs.pop("""tpu_name""" , self.tpu_name )
A = kwargs.pop("""device_idx""" , self.device_idx )
A = kwargs.pop("""eager_mode""" , self.eager_mode )
A = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**_lowerCAmelCase )
__lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Name of TPU'''} , )
__lowerCAmelCase = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
__lowerCAmelCase = field(default=snake_case_ , metadata={'''help''': '''Benchmark models in eager model.'''} )
__lowerCAmelCase = field(
default=snake_case_ , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def A (self : Optional[Any] ):
requires_backends(self , ["""tf"""] )
A = None
if self.tpu:
try:
if self.tpu_name:
A = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A = None
return tpu
@cached_property
def A (self : List[Any] ):
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
A = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
A = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def A (self : List[str] ):
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def A (self : str ):
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def A (self : List[str] ):
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def A (self : Optional[int] ):
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def A (self : Dict ):
return self.n_gpu > 0
| 353 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''umt5'''
__lowerCAmelCase = ['''past_key_values''']
def __init__(self : Dict , _lowerCAmelCase : Optional[int]=25_0112 , _lowerCAmelCase : int=512 , _lowerCAmelCase : Any=64 , _lowerCAmelCase : int=1024 , _lowerCAmelCase : int=8 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Optional[int]=6 , _lowerCAmelCase : Optional[int]=32 , _lowerCAmelCase : Any=128 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Optional[int]=1e-6 , _lowerCAmelCase : Dict=1.0 , _lowerCAmelCase : Tuple="gated-gelu" , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Optional[int]="T5Tokenizer" , _lowerCAmelCase : int=True , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : str=1 , _lowerCAmelCase : Union[str, Any]=0 , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(
is_encoder_decoder=_lowerCAmelCase , tokenizer_class=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
A = vocab_size
A = d_model
A = d_kv
A = d_ff
A = num_layers
A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A = num_heads
A = relative_attention_num_buckets
A = relative_attention_max_distance
A = dropout_rate
A = layer_norm_epsilon
A = initializer_factor
A = feed_forward_proj
A = use_cache
A = self.feed_forward_proj.split("""-""" )
A = act_info[-1]
A = act_info[0] == """gated"""
if len(_lowerCAmelCase ) > 1 and act_info[0] != "gated" or len(_lowerCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
A = """gelu_new"""
@property
def A (self : Optional[Any] ):
return self.d_model
@property
def A (self : List[Any] ):
return self.num_heads
@property
def A (self : Dict ):
return self.num_layers
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def A (self : Optional[Any] ):
A = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
A = """past_encoder_sequence + sequence"""
A = {0: """batch"""}
A = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
A = {0: """batch""", 1: """decoder_sequence"""}
A = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def A (self : Union[str, Any] ):
return 13
@property
def A (self : Tuple ):
return 5e-4
| 337 | 0 |
'''simple docstring'''
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Tuple , _lowerCAmelCase : list ):
A = set_counts
A = max(_lowerCAmelCase )
A = len(_lowerCAmelCase )
A = [1] * num_sets
A = list(range(_lowerCAmelCase ) )
def A (self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
A = self.get_parent(_lowerCAmelCase )
A = self.get_parent(_lowerCAmelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
A = 0
A = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
A = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
A = 0
A = src_parent
A = self.set_counts[src_parent]
A = max(self.max_set , _lowerCAmelCase )
return True
def A (self : Dict , _lowerCAmelCase : int ):
if self.parents[disj_set] == disj_set:
return disj_set
A = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 354 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''yolos'''
def __init__(self : Tuple , _lowerCAmelCase : List[Any]=768 , _lowerCAmelCase : str=12 , _lowerCAmelCase : Tuple=12 , _lowerCAmelCase : Optional[int]=3072 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Optional[Any]=1e-12 , _lowerCAmelCase : Optional[Any]=[512, 864] , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[int]=100 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Any=0.1 , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def A (self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A (self : Any ):
return 1e-4
@property
def A (self : int ):
return 12
| 337 | 0 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class __UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__lowerCAmelCase = "autoformer"
__lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__(self : str , _lowerCAmelCase : Optional[Any] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Any] = "student_t" , _lowerCAmelCase : Union[str, Any] = "nll" , _lowerCAmelCase : int = 1 , _lowerCAmelCase : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7] , _lowerCAmelCase : Optional[Any] = True , _lowerCAmelCase : Tuple = 0 , _lowerCAmelCase : Any = 0 , _lowerCAmelCase : Optional[Any] = 0 , _lowerCAmelCase : str = 0 , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Any] = None , _lowerCAmelCase : Union[str, Any] = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : str = 2 , _lowerCAmelCase : List[Any] = 2 , _lowerCAmelCase : str = 2 , _lowerCAmelCase : Union[str, Any] = 32 , _lowerCAmelCase : Union[str, Any] = 32 , _lowerCAmelCase : Dict = "gelu" , _lowerCAmelCase : Dict = 0.1 , _lowerCAmelCase : Dict = 0.1 , _lowerCAmelCase : str = 0.1 , _lowerCAmelCase : str = 0.1 , _lowerCAmelCase : Optional[Any] = 0.1 , _lowerCAmelCase : Optional[Any] = 100 , _lowerCAmelCase : Optional[Any] = 0.02 , _lowerCAmelCase : Optional[Any] = True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Dict = 10 , _lowerCAmelCase : Any = 25 , _lowerCAmelCase : Optional[Any] = 3 , **_lowerCAmelCase : Optional[int] , ):
# time series specific configuration
A = prediction_length
A = context_length if context_length is not None else prediction_length
A = distribution_output
A = loss
A = input_size
A = num_time_features
A = lags_sequence
A = scaling
A = num_dynamic_real_features
A = num_static_real_features
A = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
A = cardinality
else:
A = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
A = embedding_dimension
else:
A = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A = num_parallel_samples
# Transformer architecture configuration
A = input_size * len(self.lags_sequence ) + self._number_of_features
A = d_model
A = encoder_attention_heads
A = decoder_attention_heads
A = encoder_ffn_dim
A = decoder_ffn_dim
A = encoder_layers
A = decoder_layers
A = dropout
A = attention_dropout
A = activation_dropout
A = encoder_layerdrop
A = decoder_layerdrop
A = activation_function
A = init_std
A = use_cache
# Autoformer
A = label_length
A = moving_average
A = autocorrelation_factor
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def A (self : Tuple ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 355 |
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
return [ord(UpperCAmelCase ) - 96 for elem in plain]
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def __a ( ) ->None:
"""simple docstring"""
A = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , UpperCAmelCase )
print("""Decoded:""" , decode(UpperCAmelCase ) )
if __name__ == "__main__":
main()
| 337 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Dict = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class __UpperCAmelCase ( snake_case_ ):
'''simple docstring'''
__lowerCAmelCase = '''gptsan-japanese'''
__lowerCAmelCase = [
'''past_key_values''',
]
__lowerCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__(self : List[str] , _lowerCAmelCase : Union[str, Any]=3_6000 , _lowerCAmelCase : List[str]=1280 , _lowerCAmelCase : int=1024 , _lowerCAmelCase : Optional[Any]=8192 , _lowerCAmelCase : Dict=4096 , _lowerCAmelCase : Any=128 , _lowerCAmelCase : Optional[int]=10 , _lowerCAmelCase : List[Any]=0 , _lowerCAmelCase : int=16 , _lowerCAmelCase : Optional[int]=16 , _lowerCAmelCase : List[Any]=128 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : Optional[Any]=1e-5 , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Any="float32" , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : int=0.002 , _lowerCAmelCase : Dict=False , _lowerCAmelCase : int=True , _lowerCAmelCase : Optional[Any]=3_5998 , _lowerCAmelCase : Optional[int]=3_5995 , _lowerCAmelCase : Optional[int]=3_5999 , **_lowerCAmelCase : Tuple , ):
A = vocab_size
A = max_position_embeddings
A = d_model
A = d_ff
A = d_ext
A = d_spout
A = num_switch_layers
A = num_ext_layers
A = num_switch_layers + num_ext_layers
A = num_heads
A = num_experts
A = expert_capacity
A = dropout_rate
A = layer_norm_epsilon
A = router_bias
A = router_jitter_noise
A = router_dtype
A = router_ignore_padding_tokens
A = output_hidden_states
A = output_attentions
A = initializer_factor
A = output_router_logits
A = use_cache
super().__init__(
separator_token_id=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
| 356 |
'''simple docstring'''
import os
def __a ( ) ->List[Any]:
"""simple docstring"""
A = os.path.join(os.path.dirname(UpperCAmelCase ) , """num.txt""" )
with open(UpperCAmelCase ) as file_hand:
return str(sum(int(UpperCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 337 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_lowerCamelCase : Dict = None
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Dict = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCamelCase : int = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
_lowerCamelCase : int = {
"google/rembert": 256,
}
_lowerCamelCase : int = "▁"
class __UpperCAmelCase ( __lowerCAmelCase ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = RemBertTokenizer
def __init__(self : Union[str, Any] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : int=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Dict="[CLS]" , _lowerCAmelCase : Tuple="[SEP]" , _lowerCAmelCase : List[str]="<unk>" , _lowerCAmelCase : List[Any]="[SEP]" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : Tuple="[CLS]" , _lowerCAmelCase : Optional[int]="[MASK]" , **_lowerCAmelCase : List[str] , ):
A = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , **lowerCamelCase__ , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def A (self : Tuple , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A (self : Dict , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None , _lowerCAmelCase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
def A (self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A (self : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(lowerCamelCase__ ) )
return
A = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 357 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
def __a ( UpperCAmelCase ) ->List[int]:
"""simple docstring"""
if isinstance(UpperCAmelCase , np.ndarray ):
return list(tensor.shape )
A = tf.shape(UpperCAmelCase )
if tensor.shape == tf.TensorShape(UpperCAmelCase ):
return dynamic
A = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase )]
def __a ( UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) ->tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1E-9 , axis=UpperCAmelCase , name=UpperCAmelCase )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase=-1 ) ->str:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
A , A = tf.nn.moments(UpperCAmelCase , axes=[axis] , keepdims=UpperCAmelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
A = [1] * inputs.shape.rank
A = shape_list(UpperCAmelCase )[axis]
A = tf.reshape(UpperCAmelCase , UpperCAmelCase )
A = tf.reshape(UpperCAmelCase , UpperCAmelCase )
# Compute layer normalization using the batch_normalization
# function.
A = tf.nn.batch_normalization(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , offset=UpperCAmelCase , scale=UpperCAmelCase , variance_epsilon=UpperCAmelCase , )
return outputs
def __a ( UpperCAmelCase , UpperCAmelCase=0 , UpperCAmelCase=-1 ) ->int:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
A = tf.shape(UpperCAmelCase )
A = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
A = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCAmelCase , UpperCAmelCase )
def __a ( UpperCAmelCase ) ->tf.Tensor:
"""simple docstring"""
if not isinstance(UpperCAmelCase , tf.Tensor ):
A = tf.convert_to_tensor(UpperCAmelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
A = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
A = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
A = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = "input_ids" ) ->None:
"""simple docstring"""
tf.debugging.assert_less(
UpperCAmelCase , tf.cast(UpperCAmelCase , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
A = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
A = [x for x in data if len(UpperCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
A = np.asarray(UpperCAmelCase )
A = 1
A = np.array_split(UpperCAmelCase , UpperCAmelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
A = np.array_split(UpperCAmelCase , UpperCAmelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCAmelCase ):
A = chunk_data
else:
A = data
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if name in group.attrs:
A = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase , """decode""" ) else n for n in group.attrs[name]]
else:
A = []
A = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(UpperCAmelCase , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def __a ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
def _expand_single_ad_tensor(UpperCAmelCase ):
if isinstance(UpperCAmelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCAmelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase )
| 337 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase : Dict = 16
_lowerCamelCase : Any = 32
def __a ( UpperCAmelCase , UpperCAmelCase = 16 ) ->str:
"""simple docstring"""
A = AutoTokenizer.from_pretrained("""bert-base-cased""" )
A = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A = datasets.map(
A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A = 16
elif accelerator.mixed_precision != "no":
A = 8
else:
A = None
return tokenizer.pad(
A__ , padding="""longest""" , max_length=A__ , pad_to_multiple_of=A__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
A = DataLoader(
tokenized_datasets["""train"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
A = DataLoader(
tokenized_datasets["""validation"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase : Union[str, Any] = mocked_dataloaders # noqa: F811
def __a ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , A__ ) == "1":
A = 2
# New Code #
A = int(args.gradient_accumulation_steps )
A = int(args.local_sgd_steps )
# Initialize accelerator
A = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=A__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A = config["""lr"""]
A = int(config["""num_epochs"""] )
A = int(config["""seed"""] )
A = int(config["""batch_size"""] )
A = evaluate.load("""glue""" , """mrpc""" )
set_seed(A__ )
A , A = get_dataloaders(A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=A__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A = model.to(accelerator.device )
# Instantiate optimizer
A = AdamW(params=model.parameters() , lr=A__ )
# Instantiate scheduler
A = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=100 , num_training_steps=(len(A__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Now we train the model
for epoch in range(A__ ):
model.train()
with LocalSGD(
accelerator=A__ , model=A__ , local_sgd_steps=A__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(A__ ):
A = model(**A__ )
A = output.loss
accelerator.backward(A__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A = model(**A__ )
A = outputs.logits.argmax(dim=-1 )
A , A = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=A__ , references=A__ , )
A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , A__ )
def __a ( ) ->Tuple:
"""simple docstring"""
A = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=A__ , default=A__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=A__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=A__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
A = parser.parse_args()
A = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 358 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_lowerCamelCase : Any = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : int , _lowerCAmelCase : int = 14 ):
if group not in primes:
raise ValueError("""Unsupported Group""" )
A = primes[group]["""prime"""]
A = primes[group]["""generator"""]
A = int(hexlify(urandom(32 ) ) , base=16 )
def A (self : Optional[Any] ):
return hex(self.__private_key )[2:]
def A (self : Union[str, Any] ):
A = pow(self.generator , self.__private_key , self.prime )
return hex(_lowerCAmelCase )[2:]
def A (self : Any , _lowerCAmelCase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_lowerCAmelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def A (self : List[str] , _lowerCAmelCase : str ):
A = int(_lowerCAmelCase , base=16 )
if not self.is_valid_public_key(_lowerCAmelCase ):
raise ValueError("""Invalid public key""" )
A = pow(_lowerCAmelCase , self.__private_key , self.prime )
return shaaaa(str(_lowerCAmelCase ).encode() ).hexdigest()
@staticmethod
def A (_lowerCAmelCase : int , _lowerCAmelCase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_lowerCAmelCase , (prime - 1) // 2 , _lowerCAmelCase ) == 1
)
@staticmethod
def A (_lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : int = 14 ):
A = int(_lowerCAmelCase , base=16 )
A = int(_lowerCAmelCase , base=16 )
A = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""Invalid public key""" )
A = pow(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return shaaaa(str(_lowerCAmelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
_lowerCamelCase : Optional[Any] = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
_lowerCamelCase : int = {
'abeja/gpt-neox-japanese-2.7b': 2048,
}
def __a ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
with open(_UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
A = json.loads(f.read() )
A = collections.OrderedDict()
A = collections.OrderedDict()
A = collections.OrderedDict()
with open(_UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
A = f.readlines()
A = [[t.rstrip("""\n""" )] if (t == ',' or ',' not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(_UpperCAmelCase ):
A = b
A = idx
for wd in b:
A = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__(self : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int="<|endoftext|>" , _lowerCAmelCase : List[str]="<|endoftext|>" , _lowerCAmelCase : Tuple="<|startoftext|>" , _lowerCAmelCase : int="<|endoftext|>" , _lowerCAmelCase : Dict=False , **_lowerCAmelCase : Any , ):
super().__init__(
unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , do_clean_text=_lowerCAmelCase , **_lowerCAmelCase , )
if not os.path.isfile(_lowerCAmelCase ):
raise ValueError(
F"""Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained"""
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(_lowerCAmelCase ):
raise ValueError(
F"""Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google"""
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
A = do_clean_text
A = load_vocab_and_emoji(_lowerCAmelCase , _lowerCAmelCase )
A = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def A (self : str ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def A (self : Any ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def A (self : Optional[Any] , _lowerCAmelCase : int ):
return self.subword_tokenizer.tokenize(_lowerCAmelCase , clean=self.do_clean_text )
def A (self : List[str] , _lowerCAmelCase : Any ):
return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) )
def A (self : Any , _lowerCAmelCase : Any ):
return self.subword_tokenizer.convert_id_to_token(_lowerCAmelCase )
def A (self : List[str] , _lowerCAmelCase : Union[str, Any] ):
A = ''.join(_lowerCAmelCase ).strip()
return out_string
def A (self : Union[str, Any] , _lowerCAmelCase : "Conversation" ):
A = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) + [self.eos_token_id] )
if len(_lowerCAmelCase ) > self.model_max_length:
A = input_ids[-self.model_max_length :]
return input_ids
def A (self : int , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
A = 0
if os.path.isdir(_lowerCAmelCase ):
A = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
A = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
A = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
A = token_index
writer.write(""",""".join(_lowerCAmelCase ) + """\n""" )
index += 1
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , _lowerCAmelCase )
return vocab_file, emoji_file
class __UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
A = vocab # same as swe
A = ids_to_tokens # same as bpe
A = emoji
A = np.max([len(_lowerCAmelCase ) for w in self.vocab.keys()] )
A = re.compile(r"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
A = re.compile(r"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
A = re.compile(r"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
A = re.compile(
r"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
A = re.compile(
r"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
A = re.compile(
r"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
A = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
A = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
A = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__(self : List[Any] ):
return len(self.ids_to_tokens )
def A (self : int , _lowerCAmelCase : Optional[int] ):
A = self.content_repattera.sub("""<URL>""" , _lowerCAmelCase )
A = self.content_repattera.sub("""<EMAIL>""" , _lowerCAmelCase )
A = self.content_repattera.sub("""<TEL>""" , _lowerCAmelCase )
A = self.content_repattera.sub("""<DATE>""" , _lowerCAmelCase )
A = self.content_repattera.sub("""<DATE>""" , _lowerCAmelCase )
A = self.content_repattera.sub("""<PRICE>""" , _lowerCAmelCase )
A = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
A = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def A (self : str , _lowerCAmelCase : Any , _lowerCAmelCase : Any=False ):
A = text.replace(""" """ , """<SP>""" )
A = text.replace(""" """ , """<SP>""" )
A = text.replace("""\r\n""" , """<BR>""" )
A = text.replace("""\n""" , """<BR>""" )
A = text.replace("""\r""" , """<BR>""" )
A = text.replace("""\t""" , """<TAB>""" )
A = text.replace("""—""" , """ー""" )
A = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
A = text.replace(_lowerCAmelCase , _lowerCAmelCase )
if clean:
A = self.clean_text(_lowerCAmelCase )
def check_simbol(_lowerCAmelCase : Optional[Any] ):
A = x.encode()
if len(_lowerCAmelCase ) == 1 and len(_lowerCAmelCase ) == 2:
A = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2a1 and c <= 0Xc_2bf)
or (c >= 0Xc_780 and c <= 0Xc_783)
or (c >= 0Xc_ab9 and c <= 0Xc_bbf)
or (c >= 0Xc_c80 and c <= 0Xc_da2)
):
return True
return False
def checkuae(_lowerCAmelCase : Union[str, Any] ):
A = x.encode()
if len(_lowerCAmelCase ) == 1 and len(_lowerCAmelCase ) == 3:
A = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe28_080 and c <= 0Xe2b_07f:
return True
return False
A = 0
A = []
while pos < len(_lowerCAmelCase ):
A = min(len(_lowerCAmelCase ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
A = [] # (token_id, token, pos)
for e in range(_lowerCAmelCase , _lowerCAmelCase , -1 ):
A = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_lowerCAmelCase ) > 2:
A = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_lowerCAmelCase ) > 0:
# the smallest token_id is adopted
A = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[0] )[0]
result.append(_lowerCAmelCase )
A = e
else:
A = pos + 1
A = text[pos:end]
if check_simbol(_lowerCAmelCase ):
result.append("""<KIGOU>""" )
elif checkuae(_lowerCAmelCase ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
A = end
return result
def A (self : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any]="\n" ):
A = []
A = []
A = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_lowerCAmelCase ) > 0:
words.append(bytearray(_lowerCAmelCase ).decode("""utf-8""" , errors="""replace""" ) )
A = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(_lowerCAmelCase )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
words.append(bytearray(_lowerCAmelCase ).decode("""utf-8""" , errors="""replace""" ) )
A = ''.join(_lowerCAmelCase )
return text
| 359 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
else:
return a * actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(UpperCAmelCase , UpperCAmelCase )
return actual_power(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 337 | 0 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) ->str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
A = quote(a__ )
return hfh.hf_hub_url(a__ , a__ , repo_type="""dataset""" , revision=a__ )
| 360 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
if isinstance(UpperCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCAmelCase :
'''simple docstring'''
def A (self : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ):
pass
def A (self : List[str] ):
pass
def A (self : Union[str, Any] ):
pass
def A (self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ):
A = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def A (self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A (self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=None , **_lowerCAmelCase : List[Any] ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = {"""vision_model""": vision_model, """text_model""": text_model}
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A (self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Any ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
A = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
A = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
A = after_output[0].numpy()
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def A (self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any=None , **_lowerCAmelCase : List[Any] ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
A = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
A = to_atuple(vision_model.config.image_size )
A = to_atuple(vision_model.config.patch_size )
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A (self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ):
A = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def A (self : List[str] ):
A = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def A (self : Optional[int] ):
A = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def A (self : List[Any] ):
A = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def A (self : int ):
A = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def A (self : int ):
A = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def A (self : Tuple ):
A , A = self.get_pretrained_model_and_inputs()
A = model_a(**_lowerCAmelCase )
A = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
A = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
A = model_a(**_lowerCAmelCase )
A = after_outputs[0].numpy()
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : int ):
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : int ):
A = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
A = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : Union[str, Any] ):
A = TFViTModelTester(self )
A = TFBertModelTester(self )
A = vit_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[int] ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : Any ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
A = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A = to_atuple(vision_model.config.image_size )
A = to_atuple(vision_model.config.patch_size )
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A (self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : str ):
A = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
A = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : str ):
A = TFDeiTModelTester(self )
A = TFRobertaModelTester(self )
A = vit_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : Dict ):
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
A = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
A = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : Optional[Any] ):
A = TFCLIPVisionModelTester(self )
A = TFBertModelTester(self )
A = clip_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A (self : Any ):
A = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
A = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
A = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
A = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 337 | 0 |
'''simple docstring'''
_lowerCamelCase : List[str] = 'Tobias Carryer'
from time import time
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : str=int(time() ) ) -> str: # noqa: B008
A = multiplier
A = increment
A = modulo
A = seed
def A (self : Tuple ) -> Union[str, Any]:
A = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
_lowerCamelCase : Optional[int] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 361 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Any = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
_lowerCamelCase : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_lowerCamelCase : int = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_lowerCamelCase : List[str] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
_lowerCamelCase : int = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
_lowerCamelCase : int = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
_lowerCamelCase : Optional[int] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
_lowerCamelCase : Optional[Any] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
_lowerCamelCase : Any = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
_lowerCamelCase : Union[str, Any] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
_lowerCamelCase : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
_lowerCamelCase : int = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
_lowerCamelCase : Union[str, Any] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
_lowerCamelCase : Any = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
_lowerCamelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_lowerCamelCase : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_lowerCamelCase : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_lowerCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_lowerCamelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_lowerCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_lowerCamelCase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_lowerCamelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_lowerCamelCase : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_lowerCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_lowerCamelCase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_MAPPING
_lowerCamelCase : Optional[Any] = auto_class_update(FlaxAutoModel)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_lowerCamelCase : List[str] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCamelCase : List[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_lowerCamelCase : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowerCamelCase : Tuple = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCamelCase : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_lowerCamelCase : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_lowerCamelCase : str = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_lowerCamelCase : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_lowerCamelCase : List[Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCamelCase : Union[str, Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_lowerCamelCase : Optional[int] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_lowerCamelCase : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 337 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = "▁"
_lowerCamelCase : int = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
_lowerCamelCase : int = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
_lowerCamelCase : str = {"vinai/bartpho-syllable": 1024}
class __UpperCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__(self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict="<s>" , _lowerCAmelCase : Tuple="</s>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Union[str, Any]="<unk>" , _lowerCAmelCase : Tuple="<pad>" , _lowerCAmelCase : str="<mask>" , _lowerCAmelCase : Optional[Any] = None , **_lowerCAmelCase : int , ):
A = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
A = vocab_file
A = monolingual_vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
A = {}
A = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_a ) not in self.fairseq_tokens_to_ids:
A = cnt
cnt += 1
with open(_a , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
A = line.strip().split()[0]
A = len(self.fairseq_tokens_to_ids )
if str(_a ) not in self.fairseq_tokens_to_ids:
A = len(self.fairseq_tokens_to_ids )
A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self : Optional[int] ):
A = self.__dict__.copy()
A = None
A = self.sp_model.serialized_model_proto()
return state
def __setstate__(self : int , _lowerCAmelCase : int ):
A = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A (self : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : str = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A = [self.cls_token_id]
A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A (self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] = None , _lowerCAmelCase : Tuple = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def A (self : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A (self : List[str] ):
return len(self.fairseq_ids_to_tokens )
def A (self : Dict ):
A = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A (self : Dict , _lowerCAmelCase : Optional[int] ):
return self.sp_model.encode(_a , out_type=_a )
def A (self : Dict , _lowerCAmelCase : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def A (self : Any , _lowerCAmelCase : str ):
return self.fairseq_ids_to_tokens[index]
def A (self : Tuple , _lowerCAmelCase : str ):
A = """""".join(_a ).replace(_a , """ """ ).strip()
return out_string
def A (self : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] = None ):
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , """wb""" ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(_a )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_a ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , _a )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_a , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(_a )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 362 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def A (self : Any ):
A = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
A = load_dataset("""ashraq/esc50""" )
A = dataset["""train"""]["""audio"""][-1]["""array"""]
A = audio_classifier(_lowerCAmelCase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def A (self : List[str] ):
pass
@slow
@require_torch
def A (self : int ):
A = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
A = load_dataset("""ashraq/esc50""" )
A = dataset["""train"""]["""audio"""][-1]["""array"""]
A = audio_classifier(_lowerCAmelCase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] , )
A = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
A = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def A (self : Tuple ):
pass
| 337 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
A = original_name.split(""".""" )[0]
A = key.split(""".""" )
A = int(key_list[key_list.index(lowercase__ ) - 2] )
A = int(key_list[key_list.index(lowercase__ ) - 1] )
A = orig_block_num - offset
A = key.replace(F"""{orig_block_num}.{layer_num}.{original_name}""" , F"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
A = OrderedDict()
A , A = 0, 0
for key, value in state_dict.items():
if key.startswith("""network""" ):
A = key.replace("""network""" , """poolformer.encoder""" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""" ) and "patch_embed" not in key:
patch_emb_offset += 1
A = key[: key.find("""proj""" )]
A = key.replace(lowercase__ , F"""patch_embeddings.{total_embed_found}.""" )
A = key.replace("""proj""" , """projection""" )
if key.endswith("""bias""" ):
total_embed_found += 1
if "patch_embeddings" in key:
A = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
A = replace_key_with_offset(lowercase__ , lowercase__ , """mlp.fc1""" , """output.conv1""" )
if "mlp.fc2" in key:
A = replace_key_with_offset(lowercase__ , lowercase__ , """mlp.fc2""" , """output.conv2""" )
if "norm1" in key:
A = replace_key_with_offset(lowercase__ , lowercase__ , """norm1""" , """before_norm""" )
if "norm2" in key:
A = replace_key_with_offset(lowercase__ , lowercase__ , """norm2""" , """after_norm""" )
if "layer_scale_1" in key:
A = replace_key_with_offset(lowercase__ , lowercase__ , """layer_scale_1""" , """layer_scale_1""" )
if "layer_scale_2" in key:
A = replace_key_with_offset(lowercase__ , lowercase__ , """layer_scale_2""" , """layer_scale_2""" )
if "head" in key:
A = key.replace("""head""" , """classifier""" )
A = value
return new_state_dict
def __a ( ) ->Any:
"""simple docstring"""
A = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return image
@torch.no_grad()
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
A = PoolFormerConfig()
# set attributes based on model_name
A = """huggingface/label-files"""
A = model_name[-3:]
A = 1000
A = """imagenet-1k-id2label.json"""
A = (1, 1000)
# set config attributes
A = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
A = {int(lowercase__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
if size == "s12":
A = [2, 2, 6, 2]
A = [64, 128, 320, 512]
A = 4.0
A = 0.9
elif size == "s24":
A = [4, 4, 12, 4]
A = [64, 128, 320, 512]
A = 4.0
A = 0.9
elif size == "s36":
A = [6, 6, 18, 6]
A = [64, 128, 320, 512]
A = 4.0
A = 1E-6
A = 0.9
elif size == "m36":
A = [6, 6, 18, 6]
A = [96, 192, 384, 768]
A = 4.0
A = 1E-6
A = 0.95
elif size == "m48":
A = [8, 8, 24, 8]
A = [96, 192, 384, 768]
A = 4.0
A = 1E-6
A = 0.95
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor
A = PoolFormerImageProcessor(crop_pct=lowercase__ )
# Prepare image
A = prepare_img()
A = image_processor(images=lowercase__ , return_tensors="""pt""" ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
A = torch.load(lowercase__ , map_location=torch.device("""cpu""" ) )
# rename keys
A = rename_keys(lowercase__ )
# create HuggingFace model and load state dict
A = PoolFormerForImageClassification(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# Define image processor
A = PoolFormerImageProcessor(crop_pct=lowercase__ )
A = image_processor(images=prepare_img() , return_tensors="""pt""" ).pixel_values
# forward pass
A = model(lowercase__ )
A = outputs.logits
# define expected logit slices for different models
if size == "s12":
A = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
A = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
A = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
A = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
A = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(F"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , lowercase__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 363 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowerCamelCase : Dict = 'src/diffusers'
_lowerCamelCase : Dict = '.'
# This is to make sure the diffusers module imported is the one in the repo.
_lowerCamelCase : List[str] = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowerCamelCase : Tuple = spec.loader.load_module()
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
return line.startswith(UpperCAmelCase ) or len(UpperCAmelCase ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , UpperCAmelCase ) is not None
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = object_name.split(""".""" )
A = 0
# First let's find the module where our object lives.
A = parts[i]
while i < len(UpperCAmelCase ) and not os.path.isfile(os.path.join(UpperCAmelCase , f"""{module}.py""" ) ):
i += 1
if i < len(UpperCAmelCase ):
A = os.path.join(UpperCAmelCase , parts[i] )
if i >= len(UpperCAmelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(UpperCAmelCase , f"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A = f.readlines()
# Now let's find the class / func in the code!
A = """"""
A = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A = line_index
while line_index < len(UpperCAmelCase ) and _should_continue(lines[line_index] , UpperCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
return "".join(UpperCAmelCase )
_lowerCamelCase : str = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
_lowerCamelCase : Any = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
_lowerCamelCase : str = re.compile(R'<FILL\s+[^>]*>')
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
A = code.split("""\n""" )
A = 0
while idx < len(UpperCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
A = len(get_indent(UpperCAmelCase ) ) > 0
if has_indent:
A = f"""class Bla:\n{code}"""
A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCAmelCase )
A = black.format_str(UpperCAmelCase , mode=UpperCAmelCase )
A , A = style_docstrings_in_code(UpperCAmelCase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def __a ( UpperCAmelCase , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
with open(UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A = f.readlines()
A = []
A = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase ):
A = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A , A , A = search.groups()
A = find_code_in_diffusers(UpperCAmelCase )
A = get_indent(UpperCAmelCase )
A = line_index + 1 if indent == theoretical_indent else line_index + 2
A = theoretical_indent
A = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A = True
while line_index < len(UpperCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase ):
break
A = lines[line_index]
A = _should_continue(UpperCAmelCase , UpperCAmelCase ) and re.search(f"""^{indent}# End copy""" , UpperCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
A = """""".join(UpperCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
A = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCAmelCase ) is None]
A = """\n""".join(UpperCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase ) > 0:
A = replace_pattern.replace("""with""" , """""" ).split(""",""" )
A = [_re_replace_pattern.search(UpperCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A , A , A = pattern.groups()
A = re.sub(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if option.strip() == "all-casing":
A = re.sub(obja.lower() , obja.lower() , UpperCAmelCase )
A = re.sub(obja.upper() , obja.upper() , UpperCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A = blackify(lines[start_index - 1] + theoretical_code )
A = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A = lines[:start_index] + [theoretical_code] + lines[line_index:]
A = start_index + 1
if overwrite and len(UpperCAmelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase )
return diffs
def __a ( UpperCAmelCase = False ) ->int:
"""simple docstring"""
A = glob.glob(os.path.join(UpperCAmelCase , """**/*.py""" ) , recursive=UpperCAmelCase )
A = []
for filename in all_files:
A = is_copy_consistent(UpperCAmelCase , UpperCAmelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(UpperCAmelCase ) > 0:
A = """\n""".join(UpperCAmelCase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_lowerCamelCase : Any = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 337 | 0 |
'''simple docstring'''
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(__snake_case , (list, tuple) ) or not all(
isinstance(__snake_case , __snake_case ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
A = numbers[0]
for i in range(1 , len(__snake_case ) ):
# update the maximum and minimum subarray products
A = numbers[i]
if number < 0:
A = min_till_now, max_till_now
A = max(__snake_case , max_till_now * number )
A = min(__snake_case , min_till_now * number )
# update the maximum product found till now
A = max(__snake_case , __snake_case )
return max_prod
| 364 |
'''simple docstring'''
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
A = credit_card_number
A = 0
A = len(UpperCAmelCase ) - 2
for i in range(UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
A = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
A = cc_number[:i] + str(UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
A = f"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(f"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(UpperCAmelCase ) <= 16:
print(f"""{error_message} of its length.""" )
return False
if not validate_initial_digits(UpperCAmelCase ):
print(f"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(UpperCAmelCase ):
print(f"""{error_message} it fails the Luhn check.""" )
return False
print(f"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 337 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = inspect.getfile(accelerate.test_utils )
__lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__lowerCAmelCase = ["accelerate", "launch"]
__lowerCAmelCase = Path.home() / ".cache/huggingface/accelerate"
__lowerCAmelCase = "default_config.yaml"
__lowerCAmelCase = config_folder / config_file
__lowerCAmelCase = config_folder / "_default_config.yaml"
__lowerCAmelCase = Path('''tests/test_configs''' )
@classmethod
def A (cls : int ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def A (cls : Any ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def A (self : Tuple ):
A = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def A (self : List[Any] ):
for config in sorted(self.test_config_path.glob("""**/*.yaml""" ) ):
with self.subTest(config_file=lowerCamelCase_ ):
execute_subprocess_async(
self.base_cmd + ["""--config_file""", str(lowerCamelCase_ ), self.test_file_path] , env=os.environ.copy() )
def A (self : Union[str, Any] ):
execute_subprocess_async(["""accelerate""", """test"""] , env=os.environ.copy() )
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = "test-tpu"
__lowerCAmelCase = "us-central1-a"
__lowerCAmelCase = "ls"
__lowerCAmelCase = ["accelerate", "tpu-config"]
__lowerCAmelCase = "cd /usr/share"
__lowerCAmelCase = "tests/test_samples/test_command_file.sh"
__lowerCAmelCase = "Running gcloud compute tpus tpu-vm ssh"
def A (self : Dict ):
A = run_command(
self.cmd
+ ["""--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug"""] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , lowerCamelCase_ , )
def A (self : Optional[int] ):
A = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command""",
self.command,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , lowerCamelCase_ , )
def A (self : Dict ):
A = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--debug"""] , return_stdout=lowerCamelCase_ )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , lowerCamelCase_ , )
def A (self : Optional[int] ):
A = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--debug"""] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , lowerCamelCase_ , )
def A (self : Union[str, Any] ):
A = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--command""",
self.command,
"""--command""",
"""echo \"Hello World\"""",
"""--debug""",
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , lowerCamelCase_ , )
def A (self : str ):
A = run_command(
self.cmd
+ ["""--config_file""", """tests/test_configs/latest.yaml""", """--command_file""", self.command_file, """--debug"""] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , lowerCamelCase_ , )
def A (self : Dict ):
A = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command_file""",
self.command_file,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , lowerCamelCase_ , )
def A (self : Optional[int] ):
A = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--debug"""] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , lowerCamelCase_ , )
def A (self : List[str] ):
A = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--install_accelerate""",
"""--accelerate_version""",
"""12.0.0""",
"""--debug""",
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , lowerCamelCase_ , )
| 365 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Any , _lowerCAmelCase : List[Any] ):
A = str(id_ )
A = None
A = None
A = []
A = {} # {vertex:distance}
def __lt__(self : List[Any] , _lowerCAmelCase : Tuple ):
return self.key < other.key
def __repr__(self : str ):
return self.id
def A (self : Union[str, Any] , _lowerCAmelCase : List[str] ):
self.neighbors.append(_lowerCAmelCase )
def A (self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ):
A = weight
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , UpperCAmelCase )
graph[b - 1].add_edge(graph[a - 1] , UpperCAmelCase )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->list:
"""simple docstring"""
A = []
for u in graph:
A = math.inf
A = None
A = 0
A = graph[:]
while q:
A = min(UpperCAmelCase )
q.remove(UpperCAmelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
A = u
A = u.edges[v.id]
for i in range(1 , len(UpperCAmelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Iterator[tuple]:
"""simple docstring"""
for u in graph:
A = math.inf
A = None
A = 0
A = list(UpperCAmelCase )
hq.heapify(UpperCAmelCase )
while h:
A = hq.heappop(UpperCAmelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
A = u
A = u.edges[v.id]
hq.heapify(UpperCAmelCase )
for i in range(1 , len(UpperCAmelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __a ( ) ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , """config.json""" ) ) and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE_ , """config.json""" ) ):
os.remove(os.path.join(SCREAMING_SNAKE_CASE_ , """config.json""" ) )
if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE_ , """pytorch_model.bin""" ) ):
os.remove(os.path.join(SCREAMING_SNAKE_CASE_ , """pytorch_model.bin""" ) )
else:
os.makedirs(SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
def __a ( UpperCAmelCase , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
A = 2
if unlogit:
A = torch.pow(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A = p * torch.log(SCREAMING_SNAKE_CASE_ )
A = 0
return -plogp.sum(dim=-1 )
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
logger.info("""lv, h >\t""" + """\t""".join(f"""{x + 1}""" for x in range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
for row in range(len(SCREAMING_SNAKE_CASE_ ) ):
if tensor.dtype != torch.long:
logger.info(f"""layer {row + 1}:\t""" + """\t""".join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(f"""layer {row + 1}:\t""" + """\t""".join(f"""{x:d}""" for x in tensor[row].cpu().data ) )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=False ) ->int:
"""simple docstring"""
A , A = model.config.num_hidden_layers, model.config.num_attention_heads
A = torch.zeros(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(args.device )
A = torch.zeros(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(args.device )
if head_mask is None:
A = torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(args.device )
head_mask.requires_grad_(requires_grad=SCREAMING_SNAKE_CASE_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
A = None
A = 0.0
A = 0.0
for step, inputs in enumerate(tqdm(SCREAMING_SNAKE_CASE_ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
A = tuple(t.to(args.device ) for t in inputs )
((A ) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
A = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
A , A , A = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(SCREAMING_SNAKE_CASE_ ):
A = entropy(attn.detach() , SCREAMING_SNAKE_CASE_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(SCREAMING_SNAKE_CASE_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
A = 2
A = torch.pow(torch.pow(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
A = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
logger.info("""Head ranked by importance scores""" )
A = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
A = torch.arange(
head_importance.numel() , device=args.device )
A = head_ranks.view_as(SCREAMING_SNAKE_CASE_ )
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
return attn_entropy, head_importance, total_loss
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
A , A , A = compute_heads_importance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compute_entropy=SCREAMING_SNAKE_CASE_ )
A = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , SCREAMING_SNAKE_CASE_ , original_score * args.masking_threshold )
A = torch.ones_like(SCREAMING_SNAKE_CASE_ )
A = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
A = original_score
while current_score >= original_score * args.masking_threshold:
A = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
A = float("""Inf""" )
A = head_importance.view(-1 ).sort()[1]
if len(SCREAMING_SNAKE_CASE_ ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
A = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
A = new_head_mask.view(-1 )
A = 0.0
A = new_head_mask.view_as(SCREAMING_SNAKE_CASE_ )
A = new_head_mask.clone().detach()
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
# Compute metric and head importance again
A , A , A = compute_heads_importance(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compute_entropy=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ )
A = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , SCREAMING_SNAKE_CASE_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""" )
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
A = datetime.now()
A , A , A = compute_heads_importance(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compute_entropy=SCREAMING_SNAKE_CASE_ , compute_importance=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ )
A = 1 / loss
A = datetime.now() - before_time
A = sum(p.numel() for p in model.parameters() )
A = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(SCREAMING_SNAKE_CASE_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A = [
v,
]
assert sum(len(SCREAMING_SNAKE_CASE_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(SCREAMING_SNAKE_CASE_ )
A = sum(p.numel() for p in model.parameters() )
A = datetime.now()
A , A , A = compute_heads_importance(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compute_entropy=SCREAMING_SNAKE_CASE_ , compute_importance=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , actually_pruned=SCREAMING_SNAKE_CASE_ , )
A = 1 / loss
A = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 )
save_model(SCREAMING_SNAKE_CASE_ , args.output_dir )
def __a ( ) ->Union[str, Any]:
"""simple docstring"""
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=SCREAMING_SNAKE_CASE_ , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=SCREAMING_SNAKE_CASE_ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=SCREAMING_SNAKE_CASE_ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=SCREAMING_SNAKE_CASE_ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=SCREAMING_SNAKE_CASE_ , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=SCREAMING_SNAKE_CASE_ , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=SCREAMING_SNAKE_CASE_ , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=SCREAMING_SNAKE_CASE_ , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=SCREAMING_SNAKE_CASE_ , default=42 )
parser.add_argument("""--local_rank""" , type=SCREAMING_SNAKE_CASE_ , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=SCREAMING_SNAKE_CASE_ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=SCREAMING_SNAKE_CASE_ , default="""""" , help="""Can be used for distant debugging.""" )
A = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
A = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
A = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
A = torch.device("""cuda""" , args.local_rank )
A = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
A = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
A = nn.parallel.DistributedDataParallel(
SCREAMING_SNAKE_CASE_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=SCREAMING_SNAKE_CASE_ )
elif args.n_gpu > 1:
A = nn.DataParallel(SCREAMING_SNAKE_CASE_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE_ )
torch.save(SCREAMING_SNAKE_CASE_ , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , SCREAMING_SNAKE_CASE_ )
# Prepare dataset
A = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
A = (torch.from_numpy(SCREAMING_SNAKE_CASE_ ),)
A = TensorDataset(*SCREAMING_SNAKE_CASE_ )
A = RandomSampler(SCREAMING_SNAKE_CASE_ )
A = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
A = mask_heads(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
prune_heads(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 366 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''perceiver'''
def __init__(self : Dict , _lowerCAmelCase : List[str]=256 , _lowerCAmelCase : Any=1280 , _lowerCAmelCase : Dict=768 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : Optional[int]=26 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : List[Any]="kv" , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : int=1 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Any=1e-12 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : int=262 , _lowerCAmelCase : int=2048 , _lowerCAmelCase : int=56 , _lowerCAmelCase : List[Any]=[368, 496] , _lowerCAmelCase : List[Any]=16 , _lowerCAmelCase : Any=1920 , _lowerCAmelCase : Optional[int]=16 , _lowerCAmelCase : List[Any]=[1, 16, 224, 224] , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
A = num_latents
A = d_latents
A = d_model
A = num_blocks
A = num_self_attends_per_block
A = num_self_attention_heads
A = num_cross_attention_heads
A = qk_channels
A = v_channels
A = cross_attention_shape_for_attention
A = self_attention_widening_factor
A = cross_attention_widening_factor
A = hidden_act
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = use_query_residual
# masked language modeling attributes
A = vocab_size
A = max_position_embeddings
# image classification attributes
A = image_size
# flow attributes
A = train_size
# multimodal autoencoding attributes
A = num_frames
A = audio_samples_per_frame
A = samples_per_patch
A = output_shape
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@property
def A (self : List[str] ):
if self.task == "multiple-choice":
A = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def A (self : Dict ):
return 1e-4
def A (self : List[Any] , _lowerCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 40 , _lowerCAmelCase : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = preprocessor.num_special_tokens_to_add(_lowerCAmelCase )
A = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
A = [""" """.join(["""a"""] ) * seq_length] * batch_size
A = dict(preprocessor(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
A = inputs.pop("""input_ids""" )
return inputs
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
A = self._generate_dummy_images(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A = dict(preprocessor(images=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
A = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 337 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase : Tuple = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
A = state_dict.pop(SCREAMING_SNAKE_CASE__ )
A = val
def __a ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
A = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
A = value
else:
A = value
return new_state_dict
def __a ( UpperCAmelCase , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
A = """"""
if is_panoptic:
A = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[:256, :]
A = in_proj_bias[:256]
A = in_proj_weight[256:512, :]
A = in_proj_bias[256:512]
A = in_proj_weight[-256:, :]
A = in_proj_bias[-256:]
def __a ( ) ->List[Any]:
"""simple docstring"""
A = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
A = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A = """resnet101"""
if "dc5" in model_name:
A = True
A = """panoptic""" in model_name
if is_panoptic:
A = 250
else:
A = 91
A = """huggingface/label-files"""
A = """coco-detection-id2label.json"""
A = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
A = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
# load image processor
A = """coco_panoptic""" if is_panoptic else """coco_detection"""
A = ConditionalDetrImageProcessor(format=SCREAMING_SNAKE_CASE__ )
# prepare image
A = prepare_img()
A = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
A = encoding["""pixel_values"""]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
A = torch.hub.load("""DeppMeng/ConditionalDETR""" , SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ ).eval()
A = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A = """conditional_detr.""" + src
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A = rename_backbone_keys(SCREAMING_SNAKE_CASE__ )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , is_panoptic=SCREAMING_SNAKE_CASE__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
A = state_dict.pop(SCREAMING_SNAKE_CASE__ )
A = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A = state_dict.pop(SCREAMING_SNAKE_CASE__ )
A = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
A = state_dict.pop(SCREAMING_SNAKE_CASE__ )
A = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
A = state_dict.pop(SCREAMING_SNAKE_CASE__ )
A = val
# finally, create HuggingFace model and load state dict
A = ConditionalDetrForSegmentation(SCREAMING_SNAKE_CASE__ ) if is_panoptic else ConditionalDetrForObjectDetection(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
model.push_to_hub(repo_id=SCREAMING_SNAKE_CASE__ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
A = conditional_detr(SCREAMING_SNAKE_CASE__ )
A = model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_lowerCamelCase : Dict = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 367 |
'''simple docstring'''
import math
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : int , _lowerCAmelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
A = n
A = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # adjacency matrix for weight
A = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # dp[i][j] stores minimum distance from i to j
def A (self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
A = w
def A (self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def A (self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ):
return self.dp[u][v]
if __name__ == "__main__":
_lowerCamelCase : str = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337 | 0 |
'''simple docstring'''
import math
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
A = f"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCAmelCase )
if number < 1:
A = f"""Input value of [number={number}] must be > 0"""
raise ValueError(UpperCAmelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
A = int(math.log(number // 3 , 2 ) ) + 2
A = [3, 5]
A = 2
A = 3
for block in range(1 , UpperCAmelCase ):
for _ in range(UpperCAmelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
_lowerCamelCase : Optional[int] = 0
try:
_lowerCamelCase : int = proth(number)
except ValueError:
print(f"ValueError: there is no {number}th Proth number")
continue
print(f"The {number}th Proth number: {value}")
| 368 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase : List[str] = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase : List[str] = {
'Salesforce/codegen-350M-mono': 2048,
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = CodeGenTokenizer
def __init__(self : int , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Optional[Any]="<|endoftext|>" , _lowerCAmelCase : Dict="<|endoftext|>" , _lowerCAmelCase : Dict="<|endoftext|>" , _lowerCAmelCase : Any=False , **_lowerCAmelCase : Optional[int] , ):
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
if kwargs.pop("""add_bos_token""" , _lowerCAmelCase ):
A = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
F"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
F"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowerCAmelCase ) != add_prefix_space:
A = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
A = add_prefix_space
A = pre_tok_class(**_lowerCAmelCase )
A = add_prefix_space
def A (self : int , *_lowerCAmelCase : int , **_lowerCAmelCase : List[Any] ):
A = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[Any] ):
A = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def A (self : Tuple , _lowerCAmelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[List[str]] = None , **_lowerCAmelCase : Tuple , ):
A = super().decode(
token_ids=_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase , **_lowerCAmelCase , )
if truncate_before_pattern is not None and len(_lowerCAmelCase ) > 0:
A = self.truncate(_lowerCAmelCase , _lowerCAmelCase )
return decoded_text
def A (self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
def find_re(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ):
A = pattern.search(_lowerCAmelCase , _lowerCAmelCase )
return m.start() if m else -1
A = [re.compile(_lowerCAmelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
A = list(re.finditer("""^print""" , _lowerCAmelCase , re.MULTILINE ) )
if len(_lowerCAmelCase ) > 1:
A = completion[: prints[1].start()]
A = list(re.finditer("""^def""" , _lowerCAmelCase , re.MULTILINE ) )
if len(_lowerCAmelCase ) > 1:
A = completion[: defs[1].start()]
A = 0
A = [
pos for pos in [find_re(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for terminal in terminals] if pos != -1
]
if len(_lowerCAmelCase ) > 0:
return completion[: min(_lowerCAmelCase )]
else:
return completion
| 337 | 0 |
import numpy
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Optional[int] , _lowerCAmelCase : numpy.ndarray , _lowerCAmelCase : numpy.ndarray ):
A = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
A = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
A = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
A = numpy.random.rand(3 , 1 )
# Real output values provided.
A = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
A = numpy.zeros(output_array.shape )
def A (self : Optional[Any] ):
A = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A (self : int ):
A = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
A = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
A = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A (self : Dict , _lowerCAmelCase : numpy.ndarray , _lowerCAmelCase : int , _lowerCAmelCase : bool ):
for iteration in range(1 , iterations + 1 ):
A = self.feedforward()
self.back_propagation()
if give_loss:
A = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def A (self : Optional[Any] , _lowerCAmelCase : numpy.ndarray ):
A = input_arr
A = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __a ( UpperCAmelCase ) ->numpy.ndarray:
"""simple docstring"""
return 1 / (1 + numpy.exp(-value ))
def __a ( UpperCAmelCase ) ->numpy.ndarray:
"""simple docstring"""
return (value) * (1 - (value))
def __a ( ) ->int:
"""simple docstring"""
A = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
A = TwoHiddenLayerNeuralNetwork(
input_array=a__ , output_array=a__ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a__ , iterations=10 , give_loss=a__ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 369 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Optional[Any] = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 337 | 0 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) ->Dict:
"""simple docstring"""
A = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
A = True, True
A = dfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return path
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
A = 0
A = -1
for i in range(_lowerCamelCase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
A = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
A = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
A = check_circuit_or_path(_lowerCamelCase , _lowerCamelCase )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
A = 1
if check == 2:
A = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
A = dfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
def __a ( ) ->List[Any]:
"""simple docstring"""
A = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
A = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
A = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
A = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
A = {
1: [],
2: []
# all degree is zero
}
A = 10
check_euler(_lowerCamelCase , _lowerCamelCase )
check_euler(_lowerCamelCase , _lowerCamelCase )
check_euler(_lowerCamelCase , _lowerCamelCase )
check_euler(_lowerCamelCase , _lowerCamelCase )
check_euler(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 370 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowerCAmelCase )
try:
pickle.loads(pickle.dumps(_lowerCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 337 | 0 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = None
def __a ( UpperCAmelCase , UpperCAmelCase=0.999 , UpperCAmelCase="cosine" , ) ->str:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCAmelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
A = []
for i in range(UpperCAmelCase ):
A = i / num_diffusion_timesteps
A = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase ) / alpha_bar_fn(UpperCAmelCase ) , UpperCAmelCase ) )
return torch.tensor(UpperCAmelCase , dtype=torch.floataa )
class __UpperCAmelCase ( A__ , A__ ):
'''simple docstring'''
@register_to_config
def __init__(self : Tuple , _lowerCAmelCase : Union[str, Any] = 1000 , _lowerCAmelCase : str = "fixed_small_log" , _lowerCAmelCase : Tuple = True , _lowerCAmelCase : Optional[int] = 1.0 , _lowerCAmelCase : int = "epsilon" , _lowerCAmelCase : Dict = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'""" )
A = betas_for_alpha_bar(_SCREAMING_SNAKE_CASE )
A = 1.0 - self.betas
A = torch.cumprod(self.alphas , dim=0 )
A = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
A = 1.0
# setable values
A = None
A = torch.from_numpy(np.arange(0 , _SCREAMING_SNAKE_CASE )[::-1].copy() )
A = variance_type
def A (self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] = None ):
return sample
def A (self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] = None ):
A = num_inference_steps
A = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
A = (np.arange(0 , _SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1].copy().astype(np.intaa )
A = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
def A (self : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Optional[int]=None ):
if prev_timestep is None:
A = t - 1
A = self.alphas_cumprod[t]
A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A = 1 - alpha_prod_t
A = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A = self.betas[t]
else:
A = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
A = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
A = torch.log(torch.clamp(_SCREAMING_SNAKE_CASE , min=1e-20 ) )
A = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
A = variance.log()
A = beta.log()
A = (predicted_variance + 1) / 2
A = frac * max_log + (1 - frac) * min_log
return variance
def A (self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any = None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Union[str, Any] = True , ):
A = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
A = torch.split(_SCREAMING_SNAKE_CASE , sample.shape[1] , dim=1 )
else:
A = None
# 1. compute alphas, betas
if prev_timestep is None:
A = t - 1
A = self.alphas_cumprod[t]
A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A = 1 - alpha_prod_t
A = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A = self.betas[t]
A = self.alphas[t]
else:
A = 1 - alpha_prod_t / alpha_prod_t_prev
A = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A = torch.clamp(
_SCREAMING_SNAKE_CASE , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
A = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A = 0
if t > 0:
A = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=_SCREAMING_SNAKE_CASE , device=model_output.device )
A = self._get_variance(
_SCREAMING_SNAKE_CASE , predicted_variance=_SCREAMING_SNAKE_CASE , prev_timestep=_SCREAMING_SNAKE_CASE , )
if self.variance_type == "fixed_small_log":
A = variance
elif self.variance_type == "learned_range":
A = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
""" for the UnCLIPScheduler.""" )
A = variance * variance_noise
A = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
def A (self : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , ):
A = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
A = timesteps.to(original_samples.device )
A = alphas_cumprod[timesteps] ** 0.5
A = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
A = sqrt_alpha_prod.unsqueeze(-1 )
A = (1 - alphas_cumprod[timesteps]) ** 0.5
A = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
A = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
A = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 371 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Tuple , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Dict ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Optional[int] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Any ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : List[str] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : str ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : List[str] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : int ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : List[Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : str ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : List[str] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : List[Any] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Union[str, Any] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : int ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Any , *_lowerCAmelCase : str , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : List[Any] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : List[str] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Any ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Optional[int] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Dict ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Union[str, Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : List[str] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Union[str, Any] , *_lowerCAmelCase : Any , **_lowerCAmelCase : str ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Optional[Any] , *_lowerCAmelCase : int , **_lowerCAmelCase : Any ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Dict , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : int ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Any ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Optional[Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Tuple ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 337 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( a_ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = CTRLTokenizer
__lowerCAmelCase = False
__lowerCAmelCase = False
def A (self : Dict ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
A = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
A = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
A = {"""unk_token""": """<unk>"""}
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def A (self : List[Any] , **_lowerCAmelCase : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def A (self : Optional[Any] , _lowerCAmelCase : Dict ):
A = """adapt react readapt apt"""
A = """adapt react readapt apt"""
return input_text, output_text
def A (self : Optional[int] ):
A = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A = """adapt react readapt apt"""
A = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
A = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
A = tokens + [tokenizer.unk_token]
A = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
| 350 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __a ( ) ->str:
"""simple docstring"""
A = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=UpperCAmelCase , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=UpperCAmelCase , default=5 )
parser.add_argument("""--batch_size""" , type=UpperCAmelCase , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=UpperCAmelCase , default=1 )
parser.add_argument("""--freeze""" , type=UpperCAmelCase , default=UpperCAmelCase )
parser.add_argument("""--learning_rate""" , type=UpperCAmelCase , default=5E-4 )
parser.add_argument("""--seed""" , type=UpperCAmelCase , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=UpperCAmelCase , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=UpperCAmelCase , default=10 )
parser.add_argument("""--weight_decay""" , type=UpperCAmelCase , default=0.01 )
parser.add_argument("""--output_dir""" , type=UpperCAmelCase , default="""./results""" )
return parser.parse_args()
_lowerCamelCase : Optional[Any] = load('accuracy')
def __a ( UpperCAmelCase ) ->Any:
"""simple docstring"""
A , A = eval_pred
A = np.argmax(UpperCAmelCase , axis=1 )
return metric.compute(predictions=UpperCAmelCase , references=UpperCAmelCase )
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : Union[str, Any] , _lowerCAmelCase : Any ):
super().__init__()
A = trainer
def A (self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , **_lowerCAmelCase : List[Any] ):
if control.should_evaluate:
A = deepcopy(_lowerCAmelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def __a ( ) ->Optional[int]:
"""simple docstring"""
A = get_args()
set_seed(args.seed )
A = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
A = dataset.train_test_split(test_size=0.2 )
A = train_test["""test"""].train_test_split(test_size=0.5 )
A = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
A = AutoTokenizer.from_pretrained(args.model_ckpt )
A = tokenizer.eos_token
A = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
A = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
A = False
A = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(UpperCAmelCase ):
A = tokenizer(example["""src"""] , truncation=UpperCAmelCase , max_length=1024 )
A = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
A = train_test_validation.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=train_test_validation["""train"""].column_names , )
A = DataCollatorWithPadding(tokenizer=UpperCAmelCase )
A = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
A = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=UpperCAmelCase , data_collator=UpperCAmelCase , compute_metrics=UpperCAmelCase , )
print("""Training...""" )
trainer.add_callback(CustomCallback(UpperCAmelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 337 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = IFPipeline
__lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
__lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"latents"}
def A (self : List[str] ):
return self._get_dummy_components()
def A (self : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict=0 ):
if str(_snake_case ).startswith("""mps""" ):
A = torch.manual_seed(_snake_case )
else:
A = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
A = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def A (self : List[Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def A (self : Any ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def A (self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def A (self : Optional[int] ):
self._test_save_load_local()
def A (self : List[str] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def A (self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A (self : Tuple ):
# if
A = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
A = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_snake_case , tokenizer=_snake_case )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
A = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
A = None
A = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_snake_case , _snake_case , _snake_case , _snake_case )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
A = IFImgaImgPipeline(**pipe_a.components )
A = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_snake_case , _snake_case , _snake_case , _snake_case )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
A = IFInpaintingPipeline(**pipe_a.components )
A = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_snake_case , _snake_case , _snake_case , _snake_case )
def A (self : str , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] ):
# pipeline 1
_start_torch_memory_measurement()
A = torch.Generator(device="""cpu""" ).manual_seed(0 )
A = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , num_inference_steps=2 , generator=_snake_case , output_type="""np""" , )
A = output.images[0]
assert image.shape == (64, 64, 3)
A = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(_snake_case , _snake_case )
# pipeline 2
_start_torch_memory_measurement()
A = torch.Generator(device="""cpu""" ).manual_seed(0 )
A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_snake_case )
A = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type="""np""" , )
A = output.images[0]
assert image.shape == (256, 256, 3)
A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_snake_case , _snake_case )
def A (self : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
# pipeline 1
_start_torch_memory_measurement()
A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_snake_case )
A = torch.Generator(device="""cpu""" ).manual_seed(0 )
A = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , num_inference_steps=2 , generator=_snake_case , output_type="""np""" , )
A = output.images[0]
assert image.shape == (64, 64, 3)
A = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(_snake_case , _snake_case )
# pipeline 2
_start_torch_memory_measurement()
A = torch.Generator(device="""cpu""" ).manual_seed(0 )
A = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_snake_case )
A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_snake_case )
A = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , original_image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type="""np""" , )
A = output.images[0]
assert image.shape == (256, 256, 3)
A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_snake_case , _snake_case )
def A (self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : int ):
# pipeline 1
_start_torch_memory_measurement()
A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_snake_case )
A = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_snake_case )
A = torch.Generator(device="""cpu""" ).manual_seed(0 )
A = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , mask_image=_snake_case , num_inference_steps=2 , generator=_snake_case , output_type="""np""" , )
A = output.images[0]
assert image.shape == (64, 64, 3)
A = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(_snake_case , _snake_case )
# pipeline 2
_start_torch_memory_measurement()
A = torch.Generator(device="""cpu""" ).manual_seed(0 )
A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_snake_case )
A = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_snake_case )
A = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(_snake_case )
A = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , mask_image=_snake_case , original_image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type="""np""" , )
A = output.images[0]
assert image.shape == (256, 256, 3)
A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_snake_case , _snake_case )
def __a ( ) ->Optional[int]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 351 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
_lowerCamelCase : Dict = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
_lowerCamelCase : Optional[Any] = {
'ctrl': 256,
}
_lowerCamelCase : List[str] = {
'Pregnancy': 16_8629,
'Christianity': 7675,
'Explain': 10_6423,
'Fitness': 6_3440,
'Saving': 6_3163,
'Ask': 2_7171,
'Ass': 9_5985,
'Joke': 16_3509,
'Questions': 4_5622,
'Thoughts': 4_9605,
'Retail': 5_2342,
'Feminism': 16_4338,
'Writing': 1_1992,
'Atheism': 19_2263,
'Netflix': 4_8616,
'Computing': 3_9639,
'Opinion': 4_3213,
'Alone': 4_4967,
'Funny': 5_8917,
'Gaming': 4_0358,
'Human': 4088,
'India': 1331,
'Joker': 7_7138,
'Diet': 3_6206,
'Legal': 1_1859,
'Norman': 4939,
'Tip': 7_2689,
'Weight': 5_2343,
'Movies': 4_6273,
'Running': 2_3425,
'Science': 2090,
'Horror': 3_7793,
'Confession': 6_0572,
'Finance': 1_2250,
'Politics': 1_6360,
'Scary': 19_1985,
'Support': 1_2654,
'Technologies': 3_2516,
'Teenage': 6_6160,
'Event': 3_2769,
'Learned': 6_7460,
'Notion': 18_2770,
'Wikipedia': 3_7583,
'Books': 6665,
'Extract': 7_6050,
'Confessions': 10_2701,
'Conspiracy': 7_5932,
'Links': 6_3674,
'Narcissus': 15_0425,
'Relationship': 5_4766,
'Relationships': 13_4796,
'Reviews': 4_1671,
'News': 4256,
'Translation': 2_6820,
'multilingual': 12_8406,
}
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
A = set(UpperCAmelCase )
return pairs
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = CONTROL_CODES
def __init__(self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any]="<unk>" , **_lowerCAmelCase : Dict ):
super().__init__(unk_token=_lowerCAmelCase , **_lowerCAmelCase )
with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
A = json.load(_lowerCAmelCase )
A = {v: k for k, v in self.encoder.items()}
with open(_lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
A = merges_handle.read().split("""\n""" )[1:-1]
A = [tuple(merge.split() ) for merge in merges]
A = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
A = {}
@property
def A (self : Tuple ):
return len(self.encoder )
def A (self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def A (self : Optional[int] , _lowerCAmelCase : Optional[int] ):
if token in self.cache:
return self.cache[token]
A = tuple(_lowerCAmelCase )
A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
A = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(_lowerCAmelCase ):
try:
A = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(_lowerCAmelCase )
A = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
A = get_pairs(_lowerCAmelCase )
A = """@@ """.join(_lowerCAmelCase )
A = word[:-4]
A = word
return word
def A (self : List[str] , _lowerCAmelCase : Dict ):
A = []
A = re.findall(r"""\S+\n?""" , _lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(_lowerCAmelCase ).split(""" """ ) ) )
return split_tokens
def A (self : str , _lowerCAmelCase : int ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def A (self : Dict , _lowerCAmelCase : str ):
return self.decoder.get(_lowerCAmelCase , self.unk_token )
def A (self : List[str] , _lowerCAmelCase : List[Any] ):
A = """ """.join(_lowerCAmelCase ).replace("""@@ """ , """""" ).strip()
return out_string
def A (self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" )
A = 0
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
A = token_index
writer.write(""" """.join(_lowerCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 337 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 352 |
'''simple docstring'''
_lowerCamelCase : List[Any] = 'Input must be a string of 8 numbers plus letter'
_lowerCamelCase : str = 'TRWAGMYFPDXBNJZSQVHLCKE'
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
A = f"""Expected string as input, found {type(UpperCAmelCase ).__name__}"""
raise TypeError(UpperCAmelCase )
A = spanish_id.replace("""-""" , """""" ).upper()
if len(UpperCAmelCase ) != 9:
raise ValueError(UpperCAmelCase )
try:
A = int(spanish_id_clean[0:8] )
A = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(UpperCAmelCase ) from ex
if letter.isdigit():
raise ValueError(UpperCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
'''simple docstring'''
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
_lowerCamelCase : Dict = logging.getLogger(__name__)
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''token-classification'''
def __init__(self : List[str] , _lowerCAmelCase : Dict ):
if type(_lowerCAmelCase ) == dict:
A = Namespace(**_lowerCAmelCase )
A = import_module("""tasks""" )
try:
A = getattr(_lowerCAmelCase , hparams.task_type )
A = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
A = self.token_classification_task.get_labels(hparams.labels )
A = CrossEntropyLoss().ignore_index
super().__init__(_lowerCAmelCase , len(self.labels ) , self.mode )
def A (self : Optional[int] , **_lowerCAmelCase : List[Any] ):
return self.model(**_lowerCAmelCase )
def A (self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ):
A = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
A = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
A = self(**_lowerCAmelCase )
A = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def A (self : Optional[Any] ):
A = self.hparams
for mode in ["train", "dev", "test"]:
A = self._feature_file(_lowerCAmelCase )
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , _lowerCAmelCase )
A = torch.load(_lowerCAmelCase )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
A = self.token_classification_task.read_examples_from_file(args.data_dir , _lowerCAmelCase )
A = self.token_classification_task.convert_examples_to_features(
_lowerCAmelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=_lowerCAmelCase , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , _lowerCAmelCase )
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A (self : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : bool = False ):
A = self._feature_file(_lowerCAmelCase )
logger.info("""Loading features from cached file %s""" , _lowerCAmelCase )
A = torch.load(_lowerCAmelCase )
A = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
A = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
A = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
A = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
A = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , batch_size=_lowerCAmelCase )
def A (self : Any , _lowerCAmelCase : int , _lowerCAmelCase : List[str] ):
"""Compute validation""" ""
A = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
A = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
A = self(**_lowerCAmelCase )
A , A = outputs[:2]
A = logits.detach().cpu().numpy()
A = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A (self : Optional[Any] , _lowerCAmelCase : str ):
A = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
A = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
A = np.argmax(_lowerCAmelCase , axis=2 )
A = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
A = dict(enumerate(self.labels ) )
A = [[] for _ in range(out_label_ids.shape[0] )]
A = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
A = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(_lowerCAmelCase , _lowerCAmelCase ),
"""precision""": precision_score(_lowerCAmelCase , _lowerCAmelCase ),
"""recall""": recall_score(_lowerCAmelCase , _lowerCAmelCase ),
"""f1""": fa_score(_lowerCAmelCase , _lowerCAmelCase ),
}
A = dict(results.items() )
A = results
return ret, preds_list, out_label_list
def A (self : Union[str, Any] , _lowerCAmelCase : Dict ):
A , A , A = self._eval_end(_lowerCAmelCase )
A = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A (self : int , _lowerCAmelCase : List[str] ):
A , A , A = self._eval_end(_lowerCAmelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
A = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A (_lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
BaseTransformer.add_model_specific_args(_lowerCAmelCase , _lowerCAmelCase )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=_lowerCAmelCase , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=_lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=_lowerCAmelCase , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=_lowerCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
_lowerCamelCase : int = NERTransformer.add_model_specific_args(parser, os.getcwd())
_lowerCamelCase : Any = parser.parse_args()
_lowerCamelCase : List[Any] = NERTransformer(args)
_lowerCamelCase : Optional[int] = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
_lowerCamelCase : List[str] = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
_lowerCamelCase : Union[str, Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 353 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''umt5'''
__lowerCAmelCase = ['''past_key_values''']
def __init__(self : Dict , _lowerCAmelCase : Optional[int]=25_0112 , _lowerCAmelCase : int=512 , _lowerCAmelCase : Any=64 , _lowerCAmelCase : int=1024 , _lowerCAmelCase : int=8 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Optional[int]=6 , _lowerCAmelCase : Optional[int]=32 , _lowerCAmelCase : Any=128 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Optional[int]=1e-6 , _lowerCAmelCase : Dict=1.0 , _lowerCAmelCase : Tuple="gated-gelu" , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Optional[int]="T5Tokenizer" , _lowerCAmelCase : int=True , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : str=1 , _lowerCAmelCase : Union[str, Any]=0 , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(
is_encoder_decoder=_lowerCAmelCase , tokenizer_class=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
A = vocab_size
A = d_model
A = d_kv
A = d_ff
A = num_layers
A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A = num_heads
A = relative_attention_num_buckets
A = relative_attention_max_distance
A = dropout_rate
A = layer_norm_epsilon
A = initializer_factor
A = feed_forward_proj
A = use_cache
A = self.feed_forward_proj.split("""-""" )
A = act_info[-1]
A = act_info[0] == """gated"""
if len(_lowerCAmelCase ) > 1 and act_info[0] != "gated" or len(_lowerCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
A = """gelu_new"""
@property
def A (self : Optional[Any] ):
return self.d_model
@property
def A (self : List[Any] ):
return self.num_heads
@property
def A (self : Dict ):
return self.num_layers
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def A (self : Optional[Any] ):
A = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
A = """past_encoder_sequence + sequence"""
A = {0: """batch"""}
A = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
A = {0: """batch""", 1: """decoder_sequence"""}
A = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def A (self : Union[str, Any] ):
return 13
@property
def A (self : Tuple ):
return 5e-4
| 337 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_lowerCamelCase : Dict = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase__ )
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
@dataclass(frozen=lowerCamelCase__ )
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __UpperCAmelCase ( lowerCamelCase__ ):
'''simple docstring'''
__lowerCAmelCase = 42
def __init__(self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Any=False , _lowerCAmelCase : bool = False , ):
A = hans_processors[task]()
A = os.path.join(
_lowerCAmelCase , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(_lowerCAmelCase ) , _lowerCAmelCase , ) , )
A = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A = label_list[2], label_list[1]
A = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A = cached_features_file + ".lock"
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
A = torch.load(_lowerCAmelCase )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
A = (
processor.get_dev_examples(_lowerCAmelCase ) if evaluate else processor.get_train_examples(_lowerCAmelCase )
)
logger.info("""Training examples: %s""" , len(_lowerCAmelCase ) )
A = hans_convert_examples_to_features(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
logger.info("""Saving features into cached file %s""" , _lowerCAmelCase )
torch.save(self.features , _lowerCAmelCase )
def __len__(self : str ):
return len(self.features )
def __getitem__(self : List[str] , _lowerCAmelCase : Optional[Any] ):
return self.features[i]
def A (self : Optional[int] ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = 42
def __init__(self : int , _lowerCAmelCase : str , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] = 128 , _lowerCAmelCase : Dict=False , _lowerCAmelCase : bool = False , ):
A = hans_processors[task]()
A = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A = label_list[2], label_list[1]
A = label_list
A = processor.get_dev_examples(_lowerCAmelCase ) if evaluate else processor.get_train_examples(_lowerCAmelCase )
A = hans_convert_examples_to_features(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(_lowerCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
A = tf.data.Dataset.from_generator(
_lowerCAmelCase , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def A (self : Optional[Any] ):
return self.dataset
def __len__(self : int ):
return len(self.features )
def __getitem__(self : Optional[Any] , _lowerCAmelCase : List[Any] ):
return self.features[i]
def A (self : Any ):
return self.label_list
class __UpperCAmelCase ( lowerCamelCase__ ):
'''simple docstring'''
def A (self : Any , _lowerCAmelCase : Dict ):
return self._create_examples(self._read_tsv(os.path.join(_lowerCAmelCase , """heuristics_train_set.txt""" ) ) , """train""" )
def A (self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ):
return self._create_examples(self._read_tsv(os.path.join(_lowerCAmelCase , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def A (self : Union[str, Any] ):
return ["contradiction", "entailment", "neutral"]
def A (self : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ):
A = []
for i, line in enumerate(_lowerCAmelCase ):
if i == 0:
continue
A = "%s-%s" % (set_type, line[0])
A = line[5]
A = line[6]
A = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
A = line[0]
examples.append(InputExample(guid=_lowerCAmelCase , text_a=_lowerCAmelCase , text_b=_lowerCAmelCase , label=_lowerCAmelCase , pairID=_lowerCAmelCase ) )
return examples
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->Optional[int]:
"""simple docstring"""
A = {label: i for i, label in enumerate(_lowerCAmelCase )}
A = []
for ex_index, example in tqdm.tqdm(enumerate(_lowerCAmelCase ) , desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
A = tokenizer(
example.text_a , example.text_b , add_special_tokens=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" , truncation=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , )
A = label_map[example.label] if example.label in label_map else 0
A = int(example.pairID )
features.append(InputFeatures(**_lowerCAmelCase , label=_lowerCAmelCase , pairID=_lowerCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
_lowerCamelCase : Any = {
'''hans''': 3,
}
_lowerCamelCase : int = {
'''hans''': HansProcessor,
}
| 354 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''yolos'''
def __init__(self : Tuple , _lowerCAmelCase : List[Any]=768 , _lowerCAmelCase : str=12 , _lowerCAmelCase : Tuple=12 , _lowerCAmelCase : Optional[int]=3072 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Optional[Any]=1e-12 , _lowerCAmelCase : Optional[Any]=[512, 864] , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[int]=100 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Any=0.1 , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def A (self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A (self : Any ):
return 1e-4
@property
def A (self : int ):
return 12
| 337 | 0 |
def __a ( UpperCAmelCase = 1000 ) ->int:
"""simple docstring"""
A = 2**power
A = 0
while n:
A , A = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 355 |
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
return [ord(UpperCAmelCase ) - 96 for elem in plain]
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def __a ( ) ->None:
"""simple docstring"""
A = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , UpperCAmelCase )
print("""Decoded:""" , decode(UpperCAmelCase ) )
if __name__ == "__main__":
main()
| 337 | 0 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__(self : Tuple , _lowerCAmelCase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
super().__init__()
A = nn.ModuleList(lowercase_ )
def A (self : Any , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : Union[torch.Tensor, float, int] , _lowerCAmelCase : torch.Tensor , _lowerCAmelCase : List[torch.tensor] , _lowerCAmelCase : List[float] , _lowerCAmelCase : Optional[torch.Tensor] = None , _lowerCAmelCase : Optional[torch.Tensor] = None , _lowerCAmelCase : Optional[torch.Tensor] = None , _lowerCAmelCase : Optional[Dict[str, Any]] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True , ):
for i, (image, scale, controlnet) in enumerate(zip(lowercase_ , lowercase_ , self.nets ) ):
A , A = controlnet(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
# merge samples
if i == 0:
A , A = down_samples, mid_sample
else:
A = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase_ , lowercase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A (self : Any , _lowerCAmelCase : Union[str, os.PathLike] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Callable = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[str] = None , ):
A = 0
A = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase_ , is_main_process=lowercase_ , save_function=lowercase_ , safe_serialization=lowercase_ , variant=lowercase_ , )
idx += 1
A = model_path_to_save + F"""_{idx}"""
@classmethod
def A (cls : Dict , _lowerCAmelCase : Optional[Union[str, os.PathLike]] , **_lowerCAmelCase : Any ):
A = 0
A = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
A = pretrained_model_path
while os.path.isdir(lowercase_ ):
A = ControlNetModel.from_pretrained(lowercase_ , **lowercase_ )
controlnets.append(lowercase_ )
idx += 1
A = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(lowercase_ )} controlnets loaded from {pretrained_model_path}.""" )
if len(lowercase_ ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(lowercase_ )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(lowercase_ )
| 356 |
'''simple docstring'''
import os
def __a ( ) ->List[Any]:
"""simple docstring"""
A = os.path.join(os.path.dirname(UpperCAmelCase ) , """num.txt""" )
with open(UpperCAmelCase ) as file_hand:
return str(sum(int(UpperCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 337 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
A = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
A = 4
A = 48
A = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
A = [6, 6, 6, 6]
A = 60
A = [6, 6, 6, 6]
A = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
A = 4
A = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
A = 1
A = 1
A = 126
A = 7
A = 255.0
A = """"""
return config
def __a ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
A = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
A = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
A = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
A = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
A = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
A = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
A = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
A = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
A = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
A = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
A = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
A = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
A = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
A = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
A = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
A = """layernorm.weight"""
if name == "norm.bias":
A = """layernorm.bias"""
if "conv_first" in name:
A = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
A = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
A = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
A = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
A = name.replace("""upsample.2""" , """upsample.convolution_1""" )
A = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
A = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
A = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
A = """swin2sr.""" + name
return name
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
A = key.split(""".""" )
A = int(key_split[1] )
A = int(key_split[4] )
A = config.embed_dim
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[dim : dim * 2]
A = val[-dim:]
pass
else:
A = val
return orig_state_dict
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
A = get_config(lowercase_ )
A = SwinaSRForImageSuperResolution(lowercase_ )
model.eval()
A = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )
A = convert_state_dict(lowercase_ , lowercase_ )
A , A = model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowercase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"""Unexpected key {key} in state_dict""" )
# verify values
A = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
A = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert("""RGB""" )
A = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
A = 126 if """Jpeg""" in checkpoint_url else 256
A = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
A = transforms(lowercase_ ).unsqueeze(0 )
if config.num_channels == 1:
A = pixel_values[:, 0, :, :].unsqueeze(1 )
A = model(lowercase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
A = torch.Size([1, 3, 512, 512] )
A = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
A = torch.Size([1, 3, 1024, 1024] )
A = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
A = torch.Size([1, 3, 1024, 1024] )
A = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
A = torch.Size([1, 3, 512, 512] )
A = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
A = torch.Size([1, 3, 1024, 1024] )
A = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase_ , atol=1E-3 )
print("""Looks ok!""" )
A = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
A = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub(f"""caidas/{model_name}""" )
processor.push_to_hub(f"""caidas/{model_name}""" )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
_lowerCamelCase : List[str] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 357 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
def __a ( UpperCAmelCase ) ->List[int]:
"""simple docstring"""
if isinstance(UpperCAmelCase , np.ndarray ):
return list(tensor.shape )
A = tf.shape(UpperCAmelCase )
if tensor.shape == tf.TensorShape(UpperCAmelCase ):
return dynamic
A = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase )]
def __a ( UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) ->tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1E-9 , axis=UpperCAmelCase , name=UpperCAmelCase )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase=-1 ) ->str:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
A , A = tf.nn.moments(UpperCAmelCase , axes=[axis] , keepdims=UpperCAmelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
A = [1] * inputs.shape.rank
A = shape_list(UpperCAmelCase )[axis]
A = tf.reshape(UpperCAmelCase , UpperCAmelCase )
A = tf.reshape(UpperCAmelCase , UpperCAmelCase )
# Compute layer normalization using the batch_normalization
# function.
A = tf.nn.batch_normalization(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , offset=UpperCAmelCase , scale=UpperCAmelCase , variance_epsilon=UpperCAmelCase , )
return outputs
def __a ( UpperCAmelCase , UpperCAmelCase=0 , UpperCAmelCase=-1 ) ->int:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
A = tf.shape(UpperCAmelCase )
A = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
A = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCAmelCase , UpperCAmelCase )
def __a ( UpperCAmelCase ) ->tf.Tensor:
"""simple docstring"""
if not isinstance(UpperCAmelCase , tf.Tensor ):
A = tf.convert_to_tensor(UpperCAmelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
A = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
A = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
A = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = "input_ids" ) ->None:
"""simple docstring"""
tf.debugging.assert_less(
UpperCAmelCase , tf.cast(UpperCAmelCase , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
A = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
A = [x for x in data if len(UpperCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
A = np.asarray(UpperCAmelCase )
A = 1
A = np.array_split(UpperCAmelCase , UpperCAmelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
A = np.array_split(UpperCAmelCase , UpperCAmelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCAmelCase ):
A = chunk_data
else:
A = data
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if name in group.attrs:
A = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase , """decode""" ) else n for n in group.attrs[name]]
else:
A = []
A = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(UpperCAmelCase , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def __a ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
def _expand_single_ad_tensor(UpperCAmelCase ):
if isinstance(UpperCAmelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCAmelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase )
| 337 | 0 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = [False] * len(A_ )
A = []
queue.append(A_ )
A = True
while queue:
A = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(A_ )
A = True
A = u
return visited[t]
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = [-1] * (len(A_ ))
A = 0
while bfs(A_ , A_ , A_ , A_ ):
A = float("""Inf""" )
A = sink
while s != source:
# Find the minimum value in select path
A = min(A_ , graph[parent[s]][s] )
A = parent[s]
max_flow += path_flow
A = sink
while v != source:
A = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
A = parent[v]
return max_flow
_lowerCamelCase : Optional[int] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_lowerCamelCase : Tuple = 0, 5
print(ford_fulkerson(graph, source, sink))
| 358 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_lowerCamelCase : Any = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : int , _lowerCAmelCase : int = 14 ):
if group not in primes:
raise ValueError("""Unsupported Group""" )
A = primes[group]["""prime"""]
A = primes[group]["""generator"""]
A = int(hexlify(urandom(32 ) ) , base=16 )
def A (self : Optional[Any] ):
return hex(self.__private_key )[2:]
def A (self : Union[str, Any] ):
A = pow(self.generator , self.__private_key , self.prime )
return hex(_lowerCAmelCase )[2:]
def A (self : Any , _lowerCAmelCase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_lowerCAmelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def A (self : List[str] , _lowerCAmelCase : str ):
A = int(_lowerCAmelCase , base=16 )
if not self.is_valid_public_key(_lowerCAmelCase ):
raise ValueError("""Invalid public key""" )
A = pow(_lowerCAmelCase , self.__private_key , self.prime )
return shaaaa(str(_lowerCAmelCase ).encode() ).hexdigest()
@staticmethod
def A (_lowerCAmelCase : int , _lowerCAmelCase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_lowerCAmelCase , (prime - 1) // 2 , _lowerCAmelCase ) == 1
)
@staticmethod
def A (_lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : int = 14 ):
A = int(_lowerCAmelCase , base=16 )
A = int(_lowerCAmelCase , base=16 )
A = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""Invalid public key""" )
A = pow(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return shaaaa(str(_lowerCAmelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_lowerCamelCase : str = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : List[str] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : List[Any] ):
super().__init__(*__A , **__A )
requires_backends(self , """decord""" )
self.check_model_type(__A )
def A (self : Union[str, Any] , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[str]=None ):
A = {}
if frame_sampling_rate is not None:
A = frame_sampling_rate
if num_frames is not None:
A = num_frames
A = {}
if top_k is not None:
A = top_k
return preprocess_params, {}, postprocess_params
def __call__(self : Union[str, Any] , _lowerCAmelCase : Union[str, List[str]] , **_lowerCAmelCase : Any ):
return super().__call__(__A , **__A )
def A (self : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=1 ):
if num_frames is None:
A = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
A = BytesIO(requests.get(__A ).content )
A = VideoReader(__A )
videoreader.seek(0 )
A = 0
A = num_frames * frame_sampling_rate - 1
A = np.linspace(__A , __A , num=__A , dtype=np.intaa )
A = videoreader.get_batch(__A ).asnumpy()
A = list(__A )
A = self.image_processor(__A , return_tensors=self.framework )
return model_inputs
def A (self : Optional[int] , _lowerCAmelCase : Optional[Any] ):
A = self.model(**__A )
return model_outputs
def A (self : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int]=5 ):
if top_k > self.model.config.num_labels:
A = self.model.config.num_labels
if self.framework == "pt":
A = model_outputs.logits.softmax(-1 )[0]
A , A = probs.topk(__A )
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
A = scores.tolist()
A = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__A , __A )]
| 359 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
else:
return a * actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(UpperCAmelCase , UpperCAmelCase )
return actual_power(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 337 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=13 , _lowerCAmelCase : Union[str, Any]=7 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : str=True , _lowerCAmelCase : Optional[Any]=99 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : List[str]=5 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : str=37 , _lowerCAmelCase : int="gelu" , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : str=512 , _lowerCAmelCase : Any=16 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Optional[int]=0.02 , _lowerCAmelCase : Union[str, Any]=4 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_attention_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_choices
def A (self : str ):
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_attention_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A (self : int ):
A = self.prepare_config_and_inputs()
A = config_and_inputs
A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A (self : Any ):
A = FlaxRoFormerModelTester(self )
@slow
def A (self : Dict ):
for model_class_name in self.all_model_classes:
A = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_SCREAMING_SNAKE_CASE )
A = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A (self : List[Any] ):
A = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
A = jnp.array([[0, 1, 2, 3, 4, 5]] )
A = model(_SCREAMING_SNAKE_CASE )[0]
A = 5_0000
A = (1, 6, vocab_size)
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
A = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 360 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
if isinstance(UpperCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCAmelCase :
'''simple docstring'''
def A (self : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ):
pass
def A (self : List[str] ):
pass
def A (self : Union[str, Any] ):
pass
def A (self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ):
A = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def A (self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A (self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=None , **_lowerCAmelCase : List[Any] ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = {"""vision_model""": vision_model, """text_model""": text_model}
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A (self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Any ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
A = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
A = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
A = after_output[0].numpy()
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def A (self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any=None , **_lowerCAmelCase : List[Any] ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
A = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
A = to_atuple(vision_model.config.image_size )
A = to_atuple(vision_model.config.patch_size )
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A (self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ):
A = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def A (self : List[str] ):
A = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def A (self : Optional[int] ):
A = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def A (self : List[Any] ):
A = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def A (self : int ):
A = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def A (self : int ):
A = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def A (self : Tuple ):
A , A = self.get_pretrained_model_and_inputs()
A = model_a(**_lowerCAmelCase )
A = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
A = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
A = model_a(**_lowerCAmelCase )
A = after_outputs[0].numpy()
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : int ):
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : int ):
A = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
A = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : Union[str, Any] ):
A = TFViTModelTester(self )
A = TFBertModelTester(self )
A = vit_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[int] ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : Any ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
A = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A = to_atuple(vision_model.config.image_size )
A = to_atuple(vision_model.config.patch_size )
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A (self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : str ):
A = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
A = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : str ):
A = TFDeiTModelTester(self )
A = TFRobertaModelTester(self )
A = vit_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : Dict ):
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
A = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
A = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : Optional[Any] ):
A = TFCLIPVisionModelTester(self )
A = TFBertModelTester(self )
A = clip_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A (self : Any ):
A = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
A = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
A = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
A = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 337 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 100 , ) ->Any:
"""simple docstring"""
A = x_start
A = fnc(_a )
A = 0.0
for _ in range(_a ):
# Approximates curve as a sequence of linear lines and sums their length
A = (x_end - x_start) / steps + xa
A = fnc(_a )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
A = xa
A = fxa
return length
if __name__ == "__main__":
def __a ( UpperCAmelCase ) ->Any:
"""simple docstring"""
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
_lowerCamelCase : Dict = 10
while i <= 10_0000:
print(f"With {i} steps: {line_length(f, -10, 10, i)}")
i *= 10
| 361 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Any = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
_lowerCamelCase : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_lowerCamelCase : int = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_lowerCamelCase : List[str] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
_lowerCamelCase : int = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
_lowerCamelCase : int = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
_lowerCamelCase : Optional[int] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
_lowerCamelCase : Optional[Any] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
_lowerCamelCase : Any = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
_lowerCamelCase : Union[str, Any] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
_lowerCamelCase : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
_lowerCamelCase : int = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
_lowerCamelCase : Union[str, Any] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
_lowerCamelCase : Any = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
_lowerCamelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_lowerCamelCase : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_lowerCamelCase : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_lowerCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_lowerCamelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_lowerCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_lowerCamelCase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_lowerCamelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_lowerCamelCase : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_lowerCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_lowerCamelCase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_MAPPING
_lowerCamelCase : Optional[Any] = auto_class_update(FlaxAutoModel)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_lowerCamelCase : List[str] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCamelCase : List[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_lowerCamelCase : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowerCamelCase : Tuple = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCamelCase : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_lowerCamelCase : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_lowerCamelCase : str = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_lowerCamelCase : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_lowerCamelCase : List[Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCamelCase : Union[str, Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_lowerCamelCase : Optional[int] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_lowerCamelCase : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 337 | 0 |
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 362 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def A (self : Any ):
A = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
A = load_dataset("""ashraq/esc50""" )
A = dataset["""train"""]["""audio"""][-1]["""array"""]
A = audio_classifier(_lowerCAmelCase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def A (self : List[str] ):
pass
@slow
@require_torch
def A (self : int ):
A = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
A = load_dataset("""ashraq/esc50""" )
A = dataset["""train"""]["""audio"""][-1]["""array"""]
A = audio_classifier(_lowerCAmelCase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] , )
A = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
A = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def A (self : Tuple ):
pass
| 337 | 0 |
'''simple docstring'''
def __a ( UpperCAmelCase = 100 ) ->int:
"""simple docstring"""
A = n * (n + 1) * (2 * n + 1) / 6
A = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"{solution() = }")
| 363 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowerCamelCase : Dict = 'src/diffusers'
_lowerCamelCase : Dict = '.'
# This is to make sure the diffusers module imported is the one in the repo.
_lowerCamelCase : List[str] = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowerCamelCase : Tuple = spec.loader.load_module()
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
return line.startswith(UpperCAmelCase ) or len(UpperCAmelCase ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , UpperCAmelCase ) is not None
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = object_name.split(""".""" )
A = 0
# First let's find the module where our object lives.
A = parts[i]
while i < len(UpperCAmelCase ) and not os.path.isfile(os.path.join(UpperCAmelCase , f"""{module}.py""" ) ):
i += 1
if i < len(UpperCAmelCase ):
A = os.path.join(UpperCAmelCase , parts[i] )
if i >= len(UpperCAmelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(UpperCAmelCase , f"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A = f.readlines()
# Now let's find the class / func in the code!
A = """"""
A = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A = line_index
while line_index < len(UpperCAmelCase ) and _should_continue(lines[line_index] , UpperCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
return "".join(UpperCAmelCase )
_lowerCamelCase : str = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
_lowerCamelCase : Any = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
_lowerCamelCase : str = re.compile(R'<FILL\s+[^>]*>')
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
A = code.split("""\n""" )
A = 0
while idx < len(UpperCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
A = len(get_indent(UpperCAmelCase ) ) > 0
if has_indent:
A = f"""class Bla:\n{code}"""
A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCAmelCase )
A = black.format_str(UpperCAmelCase , mode=UpperCAmelCase )
A , A = style_docstrings_in_code(UpperCAmelCase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def __a ( UpperCAmelCase , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
with open(UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A = f.readlines()
A = []
A = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase ):
A = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A , A , A = search.groups()
A = find_code_in_diffusers(UpperCAmelCase )
A = get_indent(UpperCAmelCase )
A = line_index + 1 if indent == theoretical_indent else line_index + 2
A = theoretical_indent
A = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A = True
while line_index < len(UpperCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase ):
break
A = lines[line_index]
A = _should_continue(UpperCAmelCase , UpperCAmelCase ) and re.search(f"""^{indent}# End copy""" , UpperCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
A = """""".join(UpperCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
A = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCAmelCase ) is None]
A = """\n""".join(UpperCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase ) > 0:
A = replace_pattern.replace("""with""" , """""" ).split(""",""" )
A = [_re_replace_pattern.search(UpperCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A , A , A = pattern.groups()
A = re.sub(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if option.strip() == "all-casing":
A = re.sub(obja.lower() , obja.lower() , UpperCAmelCase )
A = re.sub(obja.upper() , obja.upper() , UpperCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A = blackify(lines[start_index - 1] + theoretical_code )
A = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A = lines[:start_index] + [theoretical_code] + lines[line_index:]
A = start_index + 1
if overwrite and len(UpperCAmelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase )
return diffs
def __a ( UpperCAmelCase = False ) ->int:
"""simple docstring"""
A = glob.glob(os.path.join(UpperCAmelCase , """**/*.py""" ) , recursive=UpperCAmelCase )
A = []
for filename in all_files:
A = is_copy_consistent(UpperCAmelCase , UpperCAmelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(UpperCAmelCase ) > 0:
A = """\n""".join(UpperCAmelCase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_lowerCamelCase : Any = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 337 | 0 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __a ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
for param in module.parameters():
A = False
def __a ( ) ->Tuple:
"""simple docstring"""
A = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
A = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues"""
""" with generations.""" )
return device
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
A = plt.imshow(UpperCAmelCase )
fig.axes.get_xaxis().set_visible(UpperCAmelCase )
fig.axes.get_yaxis().set_visible(UpperCAmelCase )
plt.show()
def __a ( ) ->Any:
"""simple docstring"""
A = datetime.now()
A = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 364 |
'''simple docstring'''
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
A = credit_card_number
A = 0
A = len(UpperCAmelCase ) - 2
for i in range(UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
A = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
A = cc_number[:i] + str(UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
A = f"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(f"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(UpperCAmelCase ) <= 16:
print(f"""{error_message} of its length.""" )
return False
if not validate_initial_digits(UpperCAmelCase ):
print(f"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(UpperCAmelCase ):
print(f"""{error_message} it fails the Luhn check.""" )
return False
print(f"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 337 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = TransfoXLTokenizer
__lowerCAmelCase = False
__lowerCAmelCase = False
def A (self : List[Any] ):
super().setUp()
A = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def A (self : Optional[int] , **_lowerCAmelCase : Optional[Any] ):
A = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def A (self : str , _lowerCAmelCase : str ):
A = """<unk> UNwanted , running"""
A = """<unk> unwanted, running"""
return input_text, output_text
def A (self : int ):
A = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCAmelCase_ )
A = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(lowerCAmelCase_ , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [0, 4, 8, 7] )
def A (self : Tuple ):
A = TransfoXLTokenizer(lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def A (self : Dict ):
A = TransfoXLTokenizer(lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def A (self : Any ):
A = TransfoXLTokenizer(lower_case=lowerCAmelCase_ )
A = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
A = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCAmelCase_ ) , lowerCAmelCase_ )
def A (self : List[str] ):
A = self.get_tokenizer()
A = len(lowerCAmelCase_ )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCAmelCase_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 365 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Any , _lowerCAmelCase : List[Any] ):
A = str(id_ )
A = None
A = None
A = []
A = {} # {vertex:distance}
def __lt__(self : List[Any] , _lowerCAmelCase : Tuple ):
return self.key < other.key
def __repr__(self : str ):
return self.id
def A (self : Union[str, Any] , _lowerCAmelCase : List[str] ):
self.neighbors.append(_lowerCAmelCase )
def A (self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ):
A = weight
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , UpperCAmelCase )
graph[b - 1].add_edge(graph[a - 1] , UpperCAmelCase )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->list:
"""simple docstring"""
A = []
for u in graph:
A = math.inf
A = None
A = 0
A = graph[:]
while q:
A = min(UpperCAmelCase )
q.remove(UpperCAmelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
A = u
A = u.edges[v.id]
for i in range(1 , len(UpperCAmelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Iterator[tuple]:
"""simple docstring"""
for u in graph:
A = math.inf
A = None
A = 0
A = list(UpperCAmelCase )
hq.heapify(UpperCAmelCase )
while h:
A = hq.heappop(UpperCAmelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
A = u
A = u.edges[v.id]
hq.heapify(UpperCAmelCase )
for i in range(1 , len(UpperCAmelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __a ( ) ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = PegasusConfig
__lowerCAmelCase = {}
__lowerCAmelCase = '''gelu'''
def __init__(self : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str=13 , _lowerCAmelCase : Optional[int]=7 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Dict=99 , _lowerCAmelCase : Optional[Any]=32 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : Optional[Any]=37 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Dict=40 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : Any=0 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = eos_token_id
A = pad_token_id
A = bos_token_id
def A (self : Union[str, Any] ):
A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A = tf.concat([input_ids, eos_tensor] , axis=1 )
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A = prepare_pegasus_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return config, inputs_dict
def A (self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
A = TFPegasusModel(config=SCREAMING_SNAKE_CASE_ ).get_decoder()
A = inputs_dict["""input_ids"""]
A = input_ids[:1, :]
A = inputs_dict["""attention_mask"""][:1, :]
A = inputs_dict["""head_mask"""]
A = 1
# first forward pass
A = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) , config.vocab_size )
A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A = tf.concat([input_ids, next_tokens] , axis=-1 )
A = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
A = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A = output_from_no_past[:, -3:, random_slice_idx]
A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1e-3 )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , ) ->str:
"""simple docstring"""
if attention_mask is None:
A = tf.cast(tf.math.not_equal(lowerCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__lowerCAmelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__lowerCAmelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = False
def A (self : List[Any] ):
A = TFPegasusModelTester(self )
A = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def A (self : Optional[int] ):
self.config_tester.run_common_tests()
def A (self : int ):
A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__lowerCAmelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__lowerCAmelCase = '''google/pegasus-xsum'''
@cached_property
def A (self : Optional[int] ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def A (self : str ):
A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def A (self : Optional[int] , **_lowerCAmelCase : Union[str, Any] ):
A = self.translate_src_text(**SCREAMING_SNAKE_CASE_ )
assert self.expected_text == generated_words
def A (self : Optional[Any] , **_lowerCAmelCase : List[str] ):
A = self.tokenizer(self.src_text , **SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors="""tf""" )
A = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=SCREAMING_SNAKE_CASE_ , )
A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
return generated_words
@slow
def A (self : int ):
self._assert_generated_batch_equal_expected()
| 366 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''perceiver'''
def __init__(self : Dict , _lowerCAmelCase : List[str]=256 , _lowerCAmelCase : Any=1280 , _lowerCAmelCase : Dict=768 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : Optional[int]=26 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : List[Any]="kv" , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : int=1 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Any=1e-12 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : int=262 , _lowerCAmelCase : int=2048 , _lowerCAmelCase : int=56 , _lowerCAmelCase : List[Any]=[368, 496] , _lowerCAmelCase : List[Any]=16 , _lowerCAmelCase : Any=1920 , _lowerCAmelCase : Optional[int]=16 , _lowerCAmelCase : List[Any]=[1, 16, 224, 224] , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
A = num_latents
A = d_latents
A = d_model
A = num_blocks
A = num_self_attends_per_block
A = num_self_attention_heads
A = num_cross_attention_heads
A = qk_channels
A = v_channels
A = cross_attention_shape_for_attention
A = self_attention_widening_factor
A = cross_attention_widening_factor
A = hidden_act
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = use_query_residual
# masked language modeling attributes
A = vocab_size
A = max_position_embeddings
# image classification attributes
A = image_size
# flow attributes
A = train_size
# multimodal autoencoding attributes
A = num_frames
A = audio_samples_per_frame
A = samples_per_patch
A = output_shape
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@property
def A (self : List[str] ):
if self.task == "multiple-choice":
A = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def A (self : Dict ):
return 1e-4
def A (self : List[Any] , _lowerCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 40 , _lowerCAmelCase : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = preprocessor.num_special_tokens_to_add(_lowerCAmelCase )
A = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
A = [""" """.join(["""a"""] ) * seq_length] * batch_size
A = dict(preprocessor(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
A = inputs.pop("""input_ids""" )
return inputs
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
A = self._generate_dummy_images(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A = dict(preprocessor(images=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
A = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 337 | 0 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_lowerCamelCase : List[str] = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Union[str, Any] , **_lowerCAmelCase : Tuple ):
super().__init__(**UpperCamelCase__ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(UpperCamelCase__ )
def A (self : Optional[int] , **_lowerCAmelCase : List[str] ):
A = {}
A = {}
A = {}
# preprocess args
if "points_per_batch" in kwargs:
A = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
A = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
A = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
A = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
A = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
A = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
A = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
A = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
A = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
A = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
A = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
A = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self : Tuple , _lowerCAmelCase : Optional[Any] , *_lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : str=None , **_lowerCAmelCase : str ):
return super().__call__(UpperCamelCase__ , *UpperCamelCase__ , num_workers=UpperCamelCase__ , batch_size=UpperCamelCase__ , **UpperCamelCase__ )
def A (self : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=64 , _lowerCAmelCase : int = 0 , _lowerCAmelCase : float = 512 / 1500 , _lowerCAmelCase : Optional[int] = 32 , _lowerCAmelCase : Optional[int] = 1 , ):
A = load_image(UpperCamelCase__ )
A = self.image_processor.size['''longest_edge''']
A = self.image_processor.generate_crop_boxes(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = self.image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A = self.get_inference_context()
with inference_context():
A = self._ensure_tensor_on_device(UpperCamelCase__ , device=self.device )
A = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A = image_embeddings
A = grid_points.shape[1]
A = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
A = grid_points[:, i : i + points_per_batch, :, :]
A = input_labels[:, i : i + points_per_batch]
A = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def A (self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any]=0.88 , _lowerCAmelCase : Optional[Any]=0.95 , _lowerCAmelCase : List[Any]=0 , _lowerCAmelCase : Optional[int]=1 , ):
A = model_inputs.pop("""input_boxes""" )
A = model_inputs.pop("""is_last""" )
A = model_inputs.pop("""original_sizes""" ).tolist()
A = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A = self.model(**UpperCamelCase__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A = model_outputs['''pred_masks''']
A = self.image_processor.post_process_masks(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , binarize=UpperCamelCase__ )
A = model_outputs['''iou_scores''']
A = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def A (self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=False , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Any=0.7 , ):
A = []
A = []
A = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A = torch.cat(UpperCamelCase__ )
A = torch.cat(UpperCamelCase__ )
A = self.image_processor.post_process_for_mask_generation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = defaultdict(UpperCamelCase__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCamelCase__ )
A = {}
if output_rle_mask:
A = rle_mask
if output_bboxes_mask:
A = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 367 |
'''simple docstring'''
import math
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : int , _lowerCAmelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
A = n
A = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # adjacency matrix for weight
A = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # dp[i][j] stores minimum distance from i to j
def A (self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
A = w
def A (self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def A (self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ):
return self.dp[u][v]
if __name__ == "__main__":
_lowerCamelCase : str = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337 | 0 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
def __a ( ) ->Tuple:
"""simple docstring"""
A = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" , type=lowerCamelCase_ , default="""data/dump.txt""" , help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" , type=lowerCamelCase_ , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" , type=lowerCamelCase_ , default="""bert-base-uncased""" , help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" , type=lowerCamelCase_ , default="""data/dump""" , help="""The dump file prefix.""" )
A = parser.parse_args()
logger.info(f"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
A = BertTokenizer.from_pretrained(args.tokenizer_name )
A = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
A = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
A = RobertaTokenizer.from_pretrained(args.tokenizer_name )
A = tokenizer.special_tokens_map['cls_token'] # `<s>`
A = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
A = GPTaTokenizer.from_pretrained(args.tokenizer_name )
A = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
A = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(f"""Loading text from {args.file_path}""" )
with open(args.file_path , """r""" , encoding="""utf8""" ) as fp:
A = fp.readlines()
logger.info("""Start encoding""" )
logger.info(f"""{len(lowerCamelCase_ )} examples to process.""" )
A = []
A = 0
A = 10000
A = time.time()
for text in data:
A = f"""{bos} {text.strip()} {sep}"""
A = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
rslt.append(lowerCamelCase_ )
iter += 1
if iter % interval == 0:
A = time.time()
logger.info(f"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
A = time.time()
logger.info("""Finished binarization""" )
logger.info(f"""{len(lowerCamelCase_ )} examples processed.""" )
A = f"""{args.dump_file}.{args.tokenizer_name}.pickle"""
A = tokenizer.vocab_size
if vocab_size < (1 << 16):
A = [np.uintaa(lowerCamelCase_ ) for d in rslt]
else:
A = [np.intaa(lowerCamelCase_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"""Dump to {dp_file}""" )
with open(lowerCamelCase_ , """wb""" ) as handle:
pickle.dump(rslt_ , lowerCamelCase_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 368 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase : List[str] = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase : List[str] = {
'Salesforce/codegen-350M-mono': 2048,
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = CodeGenTokenizer
def __init__(self : int , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Optional[Any]="<|endoftext|>" , _lowerCAmelCase : Dict="<|endoftext|>" , _lowerCAmelCase : Dict="<|endoftext|>" , _lowerCAmelCase : Any=False , **_lowerCAmelCase : Optional[int] , ):
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
if kwargs.pop("""add_bos_token""" , _lowerCAmelCase ):
A = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
F"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
F"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowerCAmelCase ) != add_prefix_space:
A = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
A = add_prefix_space
A = pre_tok_class(**_lowerCAmelCase )
A = add_prefix_space
def A (self : int , *_lowerCAmelCase : int , **_lowerCAmelCase : List[Any] ):
A = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[Any] ):
A = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def A (self : Tuple , _lowerCAmelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[List[str]] = None , **_lowerCAmelCase : Tuple , ):
A = super().decode(
token_ids=_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase , **_lowerCAmelCase , )
if truncate_before_pattern is not None and len(_lowerCAmelCase ) > 0:
A = self.truncate(_lowerCAmelCase , _lowerCAmelCase )
return decoded_text
def A (self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
def find_re(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ):
A = pattern.search(_lowerCAmelCase , _lowerCAmelCase )
return m.start() if m else -1
A = [re.compile(_lowerCAmelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
A = list(re.finditer("""^print""" , _lowerCAmelCase , re.MULTILINE ) )
if len(_lowerCAmelCase ) > 1:
A = completion[: prints[1].start()]
A = list(re.finditer("""^def""" , _lowerCAmelCase , re.MULTILINE ) )
if len(_lowerCAmelCase ) > 1:
A = completion[: defs[1].start()]
A = 0
A = [
pos for pos in [find_re(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for terminal in terminals] if pos != -1
]
if len(_lowerCAmelCase ) > 0:
return completion[: min(_lowerCAmelCase )]
else:
return completion
| 337 | 0 |
def __a ( UpperCAmelCase ) ->Any:
"""simple docstring"""
if number > 0:
raise ValueError("""input must be a negative integer""" )
A = len(bin(__UpperCamelCase )[3:] )
A = bin(abs(__UpperCamelCase ) - (1 << binary_number_length) )[3:]
A = (
(
"""1"""
+ """0""" * (binary_number_length - len(__UpperCamelCase ))
+ twos_complement_number
)
if number < 0
else """0"""
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Optional[Any] = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 337 | 0 |
'''simple docstring'''
import os
import pytest
from attr import dataclass
_lowerCamelCase : int = 'us-east-1' # defaults region
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
__lowerCAmelCase = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 5_00,
"save_steps": 55_00,
}
__lowerCAmelCase = {**hyperparameters, "max_steps": 10_00}
@property
def A (self : Any ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def A (self : Optional[Any] ):
return F"""{self.framework}-transfromers-test"""
@property
def A (self : Dict ):
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def A (self : Dict ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __a ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
A = SageMakerTestEnvironment(framework=request.cls.framework )
| 370 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowerCAmelCase )
try:
pickle.loads(pickle.dumps(_lowerCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 337 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_lowerCamelCase : Dict = True
except (ImportError, AttributeError):
_lowerCamelCase : Tuple = object
def __a ( *UpperCAmelCase , **UpperCAmelCase ) ->Any:
"""simple docstring"""
pass
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Union[str, Any] = logging.get_logger('transformers-cli/serving')
def __a ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
A = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(a__ , args.host , args.port , args.workers )
class __UpperCAmelCase ( _a ):
'''simple docstring'''
__lowerCAmelCase = 42
class __UpperCAmelCase ( _a ):
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class __UpperCAmelCase ( _a ):
'''simple docstring'''
__lowerCAmelCase = 42
class __UpperCAmelCase ( _a ):
'''simple docstring'''
__lowerCAmelCase = 42
class __UpperCAmelCase ( _a ):
'''simple docstring'''
@staticmethod
def A (_lowerCAmelCase : List[str] ):
A = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=__lowerCAmelCase , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=__lowerCAmelCase , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=__lowerCAmelCase , default=8888 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=__lowerCAmelCase , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=__lowerCAmelCase , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=__lowerCAmelCase , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=__lowerCAmelCase , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=__lowerCAmelCase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=__lowerCAmelCase )
def __init__(self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ):
A = pipeline
A = host
A = port
A = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(F"""Serving model over {host}:{port}""" )
A = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=["""POST"""] , ),
] , timeout=600 , )
def A (self : str ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def A (self : List[Any] ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def A (self : Any , _lowerCAmelCase : Union[str, Any] = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) , _lowerCAmelCase : List[Any] = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) ):
try:
A = self._pipeline.tokenizer.tokenize(__lowerCAmelCase )
if return_ids:
A = self._pipeline.tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
return ServeTokenizeResult(tokens=__lowerCAmelCase , tokens_ids=__lowerCAmelCase )
else:
return ServeTokenizeResult(tokens=__lowerCAmelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(__lowerCAmelCase )} )
def A (self : Any , _lowerCAmelCase : Union[str, Any] = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) , _lowerCAmelCase : Optional[int] = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) , _lowerCAmelCase : Dict = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) , ):
try:
A = self._pipeline.tokenizer.decode(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return ServeDeTokenizeResult(model="""""" , text=__lowerCAmelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(__lowerCAmelCase )} )
async def A (self : Optional[int] , _lowerCAmelCase : int=Body(__lowerCAmelCase , embed=__lowerCAmelCase ) ):
# Check we don't have empty string
if len(__lowerCAmelCase ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
A = self._pipeline(__lowerCAmelCase )
return ServeForwardResult(output=__lowerCAmelCase )
except Exception as e:
raise HTTPException(500 , {"""error""": str(__lowerCAmelCase )} )
| 371 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Tuple , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Dict ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Optional[int] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Any ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : List[str] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : str ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : List[str] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : int ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : List[Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : str ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : List[str] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : List[Any] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Union[str, Any] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : int ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Any , *_lowerCAmelCase : str , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : List[Any] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : List[str] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Any ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Optional[int] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Dict ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Union[str, Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : List[str] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Union[str, Any] , *_lowerCAmelCase : Any , **_lowerCAmelCase : str ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Optional[Any] , *_lowerCAmelCase : int , **_lowerCAmelCase : Any ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Dict , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : int ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Any ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Optional[Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Tuple ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 337 | 0 |
'''simple docstring'''
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(__a ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(__a ) == 1:
return True
A = series[1] - series[0]
for index in range(len(__a ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(__a ) == 0:
raise ValueError("""Input list must be a non empty list""" )
A = 0
for val in series:
answer += val
return answer / len(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __a ( ) ->str:
"""simple docstring"""
A = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=UpperCAmelCase , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=UpperCAmelCase , default=5 )
parser.add_argument("""--batch_size""" , type=UpperCAmelCase , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=UpperCAmelCase , default=1 )
parser.add_argument("""--freeze""" , type=UpperCAmelCase , default=UpperCAmelCase )
parser.add_argument("""--learning_rate""" , type=UpperCAmelCase , default=5E-4 )
parser.add_argument("""--seed""" , type=UpperCAmelCase , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=UpperCAmelCase , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=UpperCAmelCase , default=10 )
parser.add_argument("""--weight_decay""" , type=UpperCAmelCase , default=0.01 )
parser.add_argument("""--output_dir""" , type=UpperCAmelCase , default="""./results""" )
return parser.parse_args()
_lowerCamelCase : Optional[Any] = load('accuracy')
def __a ( UpperCAmelCase ) ->Any:
"""simple docstring"""
A , A = eval_pred
A = np.argmax(UpperCAmelCase , axis=1 )
return metric.compute(predictions=UpperCAmelCase , references=UpperCAmelCase )
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : Union[str, Any] , _lowerCAmelCase : Any ):
super().__init__()
A = trainer
def A (self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , **_lowerCAmelCase : List[Any] ):
if control.should_evaluate:
A = deepcopy(_lowerCAmelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def __a ( ) ->Optional[int]:
"""simple docstring"""
A = get_args()
set_seed(args.seed )
A = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
A = dataset.train_test_split(test_size=0.2 )
A = train_test["""test"""].train_test_split(test_size=0.5 )
A = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
A = AutoTokenizer.from_pretrained(args.model_ckpt )
A = tokenizer.eos_token
A = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
A = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
A = False
A = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(UpperCAmelCase ):
A = tokenizer(example["""src"""] , truncation=UpperCAmelCase , max_length=1024 )
A = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
A = train_test_validation.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=train_test_validation["""train"""].column_names , )
A = DataCollatorWithPadding(tokenizer=UpperCAmelCase )
A = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
A = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=UpperCAmelCase , data_collator=UpperCAmelCase , compute_metrics=UpperCAmelCase , )
print("""Training...""" )
trainer.add_callback(CustomCallback(UpperCAmelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 337 | 0 |
'''simple docstring'''
from maths.prime_check import is_prime
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
if not isinstance(A__ , A__ ):
A = f"""Input value of [number={number}] must be an integer"""
raise TypeError(A__ )
if is_prime(A__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
_lowerCamelCase : Dict = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
_lowerCamelCase : Optional[Any] = {
'ctrl': 256,
}
_lowerCamelCase : List[str] = {
'Pregnancy': 16_8629,
'Christianity': 7675,
'Explain': 10_6423,
'Fitness': 6_3440,
'Saving': 6_3163,
'Ask': 2_7171,
'Ass': 9_5985,
'Joke': 16_3509,
'Questions': 4_5622,
'Thoughts': 4_9605,
'Retail': 5_2342,
'Feminism': 16_4338,
'Writing': 1_1992,
'Atheism': 19_2263,
'Netflix': 4_8616,
'Computing': 3_9639,
'Opinion': 4_3213,
'Alone': 4_4967,
'Funny': 5_8917,
'Gaming': 4_0358,
'Human': 4088,
'India': 1331,
'Joker': 7_7138,
'Diet': 3_6206,
'Legal': 1_1859,
'Norman': 4939,
'Tip': 7_2689,
'Weight': 5_2343,
'Movies': 4_6273,
'Running': 2_3425,
'Science': 2090,
'Horror': 3_7793,
'Confession': 6_0572,
'Finance': 1_2250,
'Politics': 1_6360,
'Scary': 19_1985,
'Support': 1_2654,
'Technologies': 3_2516,
'Teenage': 6_6160,
'Event': 3_2769,
'Learned': 6_7460,
'Notion': 18_2770,
'Wikipedia': 3_7583,
'Books': 6665,
'Extract': 7_6050,
'Confessions': 10_2701,
'Conspiracy': 7_5932,
'Links': 6_3674,
'Narcissus': 15_0425,
'Relationship': 5_4766,
'Relationships': 13_4796,
'Reviews': 4_1671,
'News': 4256,
'Translation': 2_6820,
'multilingual': 12_8406,
}
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
A = set(UpperCAmelCase )
return pairs
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = CONTROL_CODES
def __init__(self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any]="<unk>" , **_lowerCAmelCase : Dict ):
super().__init__(unk_token=_lowerCAmelCase , **_lowerCAmelCase )
with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
A = json.load(_lowerCAmelCase )
A = {v: k for k, v in self.encoder.items()}
with open(_lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
A = merges_handle.read().split("""\n""" )[1:-1]
A = [tuple(merge.split() ) for merge in merges]
A = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
A = {}
@property
def A (self : Tuple ):
return len(self.encoder )
def A (self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def A (self : Optional[int] , _lowerCAmelCase : Optional[int] ):
if token in self.cache:
return self.cache[token]
A = tuple(_lowerCAmelCase )
A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
A = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(_lowerCAmelCase ):
try:
A = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(_lowerCAmelCase )
A = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
A = get_pairs(_lowerCAmelCase )
A = """@@ """.join(_lowerCAmelCase )
A = word[:-4]
A = word
return word
def A (self : List[str] , _lowerCAmelCase : Dict ):
A = []
A = re.findall(r"""\S+\n?""" , _lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(_lowerCAmelCase ).split(""" """ ) ) )
return split_tokens
def A (self : str , _lowerCAmelCase : int ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def A (self : Dict , _lowerCAmelCase : str ):
return self.decoder.get(_lowerCAmelCase , self.unk_token )
def A (self : List[str] , _lowerCAmelCase : List[Any] ):
A = """ """.join(_lowerCAmelCase ).replace("""@@ """ , """""" ).strip()
return out_string
def A (self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" )
A = 0
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
A = token_index
writer.write(""" """.join(_lowerCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 337 | 0 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_lowerCamelCase : Any = 'bert-base-cased'
_lowerCamelCase : Optional[int] = 'google/pegasus-xsum'
_lowerCamelCase : List[str] = [' Sam ate lunch today.', 'Sams lunch ingredients.']
_lowerCamelCase : Any = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
_lowerCamelCase : Union[str, Any] = 'patrickvonplaten/t5-tiny-random'
_lowerCamelCase : Dict = 'sshleifer/bart-tiny-random'
_lowerCamelCase : int = 'sshleifer/tiny-mbart'
_lowerCamelCase : str = 'sshleifer/tiny-marian-en-de'
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = '''\n'''.join(lowerCAmelCase__ )
Path(lowerCAmelCase__ ).open("""w""" ).writelines(lowerCAmelCase__ )
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(lowerCAmelCase__ , f"""{split}.source""" ) , lowerCAmelCase__ )
_dump_articles(os.path.join(lowerCAmelCase__ , f"""{split}.target""" ) , lowerCAmelCase__ )
return tmp_dir
class __UpperCAmelCase ( __lowercase ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def A (self : int , _lowerCAmelCase : int ):
A = AutoTokenizer.from_pretrained(snake_case_ )
A = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
A = max(len(tokenizer.encode(snake_case_ ) ) for a in ARTICLES )
A = max(len(tokenizer.encode(snake_case_ ) ) for a in SUMMARIES )
A = 4
A = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
A = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
A = SeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path="""train""" , max_source_length=snake_case_ , max_target_length=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , )
A = DataLoader(snake_case_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(snake_case_ , snake_case_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
A = shift_tokens_right(batch["""labels"""] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def A (self : Optional[Any] , _lowerCAmelCase : str ):
A = AutoTokenizer.from_pretrained(snake_case_ )
A = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
A = max(len(tokenizer.encode(snake_case_ ) ) for a in ARTICLES )
A = max(len(tokenizer.encode(snake_case_ ) ) for a in SUMMARIES )
A = 4
A = LegacySeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path="""train""" , max_source_length=20 , max_target_length=snake_case_ , )
A = DataLoader(snake_case_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def A (self : List[str] ):
A = AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""" )
A = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
A = tmp_dir.joinpath("""train.source""" ).open().readlines()
A = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(snake_case_ , snake_case_ , 128 , snake_case_ )
A = {x.name for x in tmp_dir.iterdir()}
A = {x.name for x in save_dir.iterdir()}
A = save_dir.joinpath("""train.source""" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(snake_case_ ) < len(snake_case_ )
assert len(snake_case_ ) == 1
assert len(packed_examples[0] ) == sum(len(snake_case_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="""This test requires fairseq""" )
def A (self : Any ):
if not FAIRSEQ_AVAILABLE:
return
A = self._get_dataset(max_len=64 )
A = 64
A = ds.make_dynamic_sampler(snake_case_ , required_batch_size_multiple=snake_case_ )
A = [len(snake_case_ ) for x in batch_sampler]
assert len(set(snake_case_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(snake_case_ ) == len(snake_case_ ) # no dropped or added examples
A = DataLoader(snake_case_ , batch_sampler=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 )
A = []
A = []
for batch in data_loader:
A = batch['''input_ids'''].shape
A = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
A = np.product(batch["""input_ids"""].shape )
num_src_per_batch.append(snake_case_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(snake_case_ )
assert num_src_per_batch[0] == max(snake_case_ )
if failures:
raise AssertionError(F"""too many tokens in {len(snake_case_ )} batches""" )
def A (self : int ):
A = self._get_dataset(max_len=512 )
A = 2
A = ds.make_sortish_sampler(snake_case_ , shuffle=snake_case_ )
A = DataLoader(snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 )
A = DataLoader(snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=snake_case_ )
A = tokenizer.pad_token_id
def count_pad_tokens(_lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str]="input_ids" ):
return [batch[k].eq(snake_case_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(snake_case_ , k="""labels""" ) ) < sum(count_pad_tokens(snake_case_ , k="""labels""" ) )
assert sum(count_pad_tokens(snake_case_ ) ) < sum(count_pad_tokens(snake_case_ ) )
assert len(snake_case_ ) == len(snake_case_ )
def A (self : int , _lowerCAmelCase : int=1000 , _lowerCAmelCase : Optional[Any]=128 ):
if os.getenv("""USE_REAL_DATA""" , snake_case_ ):
A = '''examples/seq2seq/wmt_en_ro'''
A = max_len * 2 * 64
if not Path(snake_case_ ).joinpath("""train.len""" ).exists():
save_len_file(snake_case_ , snake_case_ )
else:
A = '''examples/seq2seq/test_data/wmt_en_ro'''
A = max_len * 4
save_len_file(snake_case_ , snake_case_ )
A = AutoTokenizer.from_pretrained(snake_case_ )
A = SeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path="""train""" , max_source_length=snake_case_ , max_target_length=snake_case_ , n_obs=snake_case_ , )
return ds, max_tokens, tokenizer
def A (self : List[str] ):
A = self._get_dataset()
A = set(DistributedSortishSampler(snake_case_ , 256 , num_replicas=2 , rank=0 , add_extra_examples=snake_case_ ) )
A = set(DistributedSortishSampler(snake_case_ , 256 , num_replicas=2 , rank=1 , add_extra_examples=snake_case_ ) )
assert idsa.intersection(snake_case_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def A (self : str , _lowerCAmelCase : Union[str, Any] ):
A = AutoTokenizer.from_pretrained(snake_case_ , use_fast=snake_case_ )
if tok_name == MBART_TINY:
A = SeqaSeqDataset(
snake_case_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , src_lang="""EN""" , tgt_lang="""FR""" , )
A = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
A = SeqaSeqDataset(
snake_case_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , )
A = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(snake_case_ ) == 1 if tok_name == BART_TINY else len(snake_case_ ) == 0
| 352 |
'''simple docstring'''
_lowerCamelCase : List[Any] = 'Input must be a string of 8 numbers plus letter'
_lowerCamelCase : str = 'TRWAGMYFPDXBNJZSQVHLCKE'
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
A = f"""Expected string as input, found {type(UpperCAmelCase ).__name__}"""
raise TypeError(UpperCAmelCase )
A = spanish_id.replace("""-""" , """""" ).upper()
if len(UpperCAmelCase ) != 9:
raise ValueError(UpperCAmelCase )
try:
A = int(spanish_id_clean[0:8] )
A = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(UpperCAmelCase ) from ex
if letter.isdigit():
raise ValueError(UpperCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
if gpta_config_file == "":
A = GPTaConfig()
else:
A = GPTaConfig.from_json_file(UpperCAmelCase )
A = GPTaModel(UpperCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
A = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
A = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , UpperCAmelCase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
_lowerCamelCase : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 353 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''umt5'''
__lowerCAmelCase = ['''past_key_values''']
def __init__(self : Dict , _lowerCAmelCase : Optional[int]=25_0112 , _lowerCAmelCase : int=512 , _lowerCAmelCase : Any=64 , _lowerCAmelCase : int=1024 , _lowerCAmelCase : int=8 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Optional[int]=6 , _lowerCAmelCase : Optional[int]=32 , _lowerCAmelCase : Any=128 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Optional[int]=1e-6 , _lowerCAmelCase : Dict=1.0 , _lowerCAmelCase : Tuple="gated-gelu" , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Optional[int]="T5Tokenizer" , _lowerCAmelCase : int=True , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : str=1 , _lowerCAmelCase : Union[str, Any]=0 , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(
is_encoder_decoder=_lowerCAmelCase , tokenizer_class=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
A = vocab_size
A = d_model
A = d_kv
A = d_ff
A = num_layers
A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A = num_heads
A = relative_attention_num_buckets
A = relative_attention_max_distance
A = dropout_rate
A = layer_norm_epsilon
A = initializer_factor
A = feed_forward_proj
A = use_cache
A = self.feed_forward_proj.split("""-""" )
A = act_info[-1]
A = act_info[0] == """gated"""
if len(_lowerCAmelCase ) > 1 and act_info[0] != "gated" or len(_lowerCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
A = """gelu_new"""
@property
def A (self : Optional[Any] ):
return self.d_model
@property
def A (self : List[Any] ):
return self.num_heads
@property
def A (self : Dict ):
return self.num_layers
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def A (self : Optional[Any] ):
A = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
A = """past_encoder_sequence + sequence"""
A = {0: """batch"""}
A = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
A = {0: """batch""", 1: """decoder_sequence"""}
A = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def A (self : Union[str, Any] ):
return 13
@property
def A (self : Tuple ):
return 5e-4
| 337 | 0 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __a ( ) ->str:
"""simple docstring"""
A = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=UpperCAmelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=UpperCAmelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=UpperCAmelCase )
return parser.parse_args()
def __a ( ) ->str:
"""simple docstring"""
A = parse_args()
# Import training_script as a module.
A = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
A = script_fpath.stem
A = importlib.import_module(UpperCAmelCase )
# Patch sys.argv
A = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 354 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''yolos'''
def __init__(self : Tuple , _lowerCAmelCase : List[Any]=768 , _lowerCAmelCase : str=12 , _lowerCAmelCase : Tuple=12 , _lowerCAmelCase : Optional[int]=3072 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Optional[Any]=1e-12 , _lowerCAmelCase : Optional[Any]=[512, 864] , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[int]=100 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Any=0.1 , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def A (self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A (self : Any ):
return 1e-4
@property
def A (self : int ):
return 12
| 337 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Any ):
A = tempfile.mkdtemp()
# fmt: off
A = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
A = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A = {"""unk_token""": """<unk>"""}
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase_ ) )
A = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
A = os.path.join(self.tmpdirname , UpperCamelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def A (self : List[Any] , **_lowerCAmelCase : List[str] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def A (self : Tuple , **_lowerCAmelCase : Union[str, Any] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def A (self : Optional[int] , **_lowerCAmelCase : int ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def A (self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def A (self : Optional[int] ):
A = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A (self : Union[str, Any] ):
A = self.get_tokenizer()
A = self.get_rust_tokenizer()
A = self.get_image_processor()
A = CLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
A = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ )
A = CLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
A = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_ )
def A (self : List[str] ):
A = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
A = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def A (self : Union[str, Any] ):
A = self.get_image_processor()
A = self.get_tokenizer()
A = CLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
A = self.prepare_image_inputs()
A = image_processor(UpperCamelCase_ , return_tensors="""np""" )
A = processor(images=UpperCamelCase_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A (self : Optional[Any] ):
A = self.get_image_processor()
A = self.get_tokenizer()
A = CLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
A = """lower newer"""
A = processor(text=UpperCamelCase_ )
A = tokenizer(UpperCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A (self : Optional[Any] ):
A = self.get_image_processor()
A = self.get_tokenizer()
A = CLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
A = """lower newer"""
A = self.prepare_image_inputs()
A = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def A (self : Tuple ):
A = self.get_image_processor()
A = self.get_tokenizer()
A = CLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A = processor.batch_decode(UpperCamelCase_ )
A = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def A (self : int ):
A = self.get_image_processor()
A = self.get_tokenizer()
A = CLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
A = """lower newer"""
A = self.prepare_image_inputs()
A = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 355 |
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
return [ord(UpperCAmelCase ) - 96 for elem in plain]
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def __a ( ) ->None:
"""simple docstring"""
A = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , UpperCAmelCase )
print("""Decoded:""" , decode(UpperCAmelCase ) )
if __name__ == "__main__":
main()
| 337 | 0 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_lowerCamelCase : Optional[int] = 'facebook/wmt19-en-de'
_lowerCamelCase : int = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_lowerCamelCase : Optional[int] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_lowerCamelCase : str = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
_lowerCamelCase : Optional[int] = tokenizer(['Making tiny model'], return_tensors='pt')
_lowerCamelCase : Optional[int] = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
_lowerCamelCase : Union[str, Any] = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 356 |
'''simple docstring'''
import os
def __a ( ) ->List[Any]:
"""simple docstring"""
A = os.path.join(os.path.dirname(UpperCAmelCase ) , """num.txt""" )
with open(UpperCAmelCase ) as file_hand:
return str(sum(int(UpperCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 337 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = """ibert"""
def __init__(self : str , _lowerCAmelCase : List[Any]=3_0522 , _lowerCAmelCase : List[str]=768 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Any=12 , _lowerCAmelCase : Any=3072 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : int=512 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : Dict=1e-12 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : List[Any]=0 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Optional[Any]="absolute" , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Tuple="none" , **_lowerCAmelCase : List[str] , ):
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = position_embedding_type
A = quant_mode
A = force_dequant
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@property
def A (self : List[str] ):
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 357 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
def __a ( UpperCAmelCase ) ->List[int]:
"""simple docstring"""
if isinstance(UpperCAmelCase , np.ndarray ):
return list(tensor.shape )
A = tf.shape(UpperCAmelCase )
if tensor.shape == tf.TensorShape(UpperCAmelCase ):
return dynamic
A = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase )]
def __a ( UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) ->tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1E-9 , axis=UpperCAmelCase , name=UpperCAmelCase )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase=-1 ) ->str:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
A , A = tf.nn.moments(UpperCAmelCase , axes=[axis] , keepdims=UpperCAmelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
A = [1] * inputs.shape.rank
A = shape_list(UpperCAmelCase )[axis]
A = tf.reshape(UpperCAmelCase , UpperCAmelCase )
A = tf.reshape(UpperCAmelCase , UpperCAmelCase )
# Compute layer normalization using the batch_normalization
# function.
A = tf.nn.batch_normalization(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , offset=UpperCAmelCase , scale=UpperCAmelCase , variance_epsilon=UpperCAmelCase , )
return outputs
def __a ( UpperCAmelCase , UpperCAmelCase=0 , UpperCAmelCase=-1 ) ->int:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
A = tf.shape(UpperCAmelCase )
A = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
A = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCAmelCase , UpperCAmelCase )
def __a ( UpperCAmelCase ) ->tf.Tensor:
"""simple docstring"""
if not isinstance(UpperCAmelCase , tf.Tensor ):
A = tf.convert_to_tensor(UpperCAmelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
A = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
A = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
A = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = "input_ids" ) ->None:
"""simple docstring"""
tf.debugging.assert_less(
UpperCAmelCase , tf.cast(UpperCAmelCase , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
A = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
A = [x for x in data if len(UpperCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
A = np.asarray(UpperCAmelCase )
A = 1
A = np.array_split(UpperCAmelCase , UpperCAmelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
A = np.array_split(UpperCAmelCase , UpperCAmelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCAmelCase ):
A = chunk_data
else:
A = data
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if name in group.attrs:
A = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase , """decode""" ) else n for n in group.attrs[name]]
else:
A = []
A = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(UpperCAmelCase , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def __a ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
def _expand_single_ad_tensor(UpperCAmelCase ):
if isinstance(UpperCAmelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCAmelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase )
| 337 | 0 |
'''simple docstring'''
_lowerCamelCase : Optional[int] = range(2, 20 + 1)
_lowerCamelCase : str = [10**k for k in range(ks[-1] + 1)]
_lowerCamelCase : dict[int, dict[int, list[list[int]]]] = {}
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
A = sum(a_i[j] for j in range(_A , len(_A ) ) )
A = sum(a_i[j] * base[j] for j in range(min(len(_A ) , _A ) ) )
A = 0, 0
A = n - i
A = memo.get(_A )
if sub_memo is not None:
A = sub_memo.get(_A )
if jumps is not None and len(_A ) > 0:
# find and make the largest jump without going over
A = -1
for _k in range(len(_A ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A = _k
break
if max_jump >= 0:
A = jumps[max_jump]
# since the difference between jumps is cached, add c
A = diff + c
for j in range(min(_A , len(_A ) ) ):
A = divmod(_A , 10 )
if new_c > 0:
add(_A , _A , _A )
else:
A = []
else:
A = {c: []}
A = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A = next_term(_A , k - 1 , i + dn , _A )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A = compute(_A , _A , i + dn , _A )
diff += _diff
dn += terms_jumped
A = sub_memo[c]
# keep jumps sorted by # of terms skipped
A = 0
while j < len(_A ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_A , (diff, dn, k) )
return (diff, dn)
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(_A ):
a_i.extend([0 for _ in range(k - len(_A ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A = i
A = 0, 0, 0
for j in range(len(_A ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A = ds_c + ds_b
diff += addend
A = 0
for j in range(_A ):
A = a_i[j] + addend
A = divmod(_A , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_A , _A , _A )
return diff, i - start_i
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
for j in range(_A , len(_A ) ):
A = digits[j] + addend
if s >= 10:
A = divmod(_A , 10 )
A = addend // 10 + quotient
else:
A = s
A = addend // 10
if addend == 0:
break
while addend > 0:
A = divmod(_A , 10 )
digits.append(_A )
def __a ( UpperCAmelCase = 10**15 ) ->Union[str, Any]:
"""simple docstring"""
A = [1]
A = 1
A = 0
while True:
A = next_term(_A , 20 , i + dn , _A )
dn += terms_jumped
if dn == n - i:
break
A = 0
for j in range(len(_A ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"{solution() = }")
| 358 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_lowerCamelCase : Any = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : int , _lowerCAmelCase : int = 14 ):
if group not in primes:
raise ValueError("""Unsupported Group""" )
A = primes[group]["""prime"""]
A = primes[group]["""generator"""]
A = int(hexlify(urandom(32 ) ) , base=16 )
def A (self : Optional[Any] ):
return hex(self.__private_key )[2:]
def A (self : Union[str, Any] ):
A = pow(self.generator , self.__private_key , self.prime )
return hex(_lowerCAmelCase )[2:]
def A (self : Any , _lowerCAmelCase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_lowerCAmelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def A (self : List[str] , _lowerCAmelCase : str ):
A = int(_lowerCAmelCase , base=16 )
if not self.is_valid_public_key(_lowerCAmelCase ):
raise ValueError("""Invalid public key""" )
A = pow(_lowerCAmelCase , self.__private_key , self.prime )
return shaaaa(str(_lowerCAmelCase ).encode() ).hexdigest()
@staticmethod
def A (_lowerCAmelCase : int , _lowerCAmelCase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_lowerCAmelCase , (prime - 1) // 2 , _lowerCAmelCase ) == 1
)
@staticmethod
def A (_lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : int = 14 ):
A = int(_lowerCAmelCase , base=16 )
A = int(_lowerCAmelCase , base=16 )
A = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""Invalid public key""" )
A = pow(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return shaaaa(str(_lowerCAmelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCAmelCase = (DDPMScheduler,)
def A (self : Optional[Any] , **_lowerCAmelCase : str ):
A = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**A__ )
return config
def A (self : Tuple ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=A__ )
def A (self : str ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A__ , beta_end=A__ )
def A (self : List[Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A__ )
def A (self : Optional[Any] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A__ )
def A (self : str ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A__ )
def A (self : Optional[int] ):
self.check_over_configs(thresholding=A__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A__ , prediction_type=A__ , sample_max_value=A__ , )
def A (self : Union[str, Any] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A__ )
def A (self : int ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=A__ )
def A (self : Any ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**A__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def A (self : Dict ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**A__ )
A = len(A__ )
A = self.dummy_model()
A = self.dummy_sample_deter
A = torch.manual_seed(0 )
for t in reversed(range(A__ ) ):
# 1. predict noise residual
A = model(A__ , A__ )
# 2. predict previous mean of sample x_t-1
A = scheduler.step(A__ , A__ , A__ , generator=A__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A = pred_prev_sample
A = torch.sum(torch.abs(A__ ) )
A = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def A (self : Union[str, Any] ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config(prediction_type="""v_prediction""" )
A = scheduler_class(**A__ )
A = len(A__ )
A = self.dummy_model()
A = self.dummy_sample_deter
A = torch.manual_seed(0 )
for t in reversed(range(A__ ) ):
# 1. predict noise residual
A = model(A__ , A__ )
# 2. predict previous mean of sample x_t-1
A = scheduler.step(A__ , A__ , A__ , generator=A__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A = pred_prev_sample
A = torch.sum(torch.abs(A__ ) )
A = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def A (self : Any ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**A__ )
A = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A__ )
A = scheduler.timesteps
for i, timestep in enumerate(A__ ):
if i == len(A__ ) - 1:
A = -1
else:
A = timesteps[i + 1]
A = scheduler.previous_timestep(A__ )
A = prev_t.item()
self.assertEqual(A__ , A__ )
def A (self : Union[str, Any] ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**A__ )
A = [100, 87, 50, 51, 0]
with self.assertRaises(A__ , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=A__ )
def A (self : Any ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**A__ )
A = [100, 87, 50, 1, 0]
A = len(A__ )
with self.assertRaises(A__ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=A__ , timesteps=A__ )
def A (self : List[Any] ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**A__ )
A = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A__ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=A__ )
| 359 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
else:
return a * actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(UpperCAmelCase , UpperCAmelCase )
return actual_power(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 337 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase : Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase : int = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase : Union[str, Any] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase : Dict = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
_lowerCamelCase : int = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
_lowerCamelCase : List[Any] = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
_lowerCamelCase : Optional[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
_lowerCamelCase : str = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
_lowerCamelCase : int = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __UpperCAmelCase ( __a ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase = DPRContextEncoderTokenizer
class __UpperCAmelCase ( __a ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase = DPRQuestionEncoderTokenizer
_lowerCamelCase : Optional[Any] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
_lowerCamelCase : str = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
_lowerCamelCase : Dict = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__a )
class __UpperCAmelCase :
'''simple docstring'''
def __call__(self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Union[bool, str] = False , _lowerCAmelCase : Union[bool, str] = False , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : Optional[bool] = None , **_lowerCAmelCase : Union[str, Any] , ):
if titles is None and texts is None:
return super().__call__(
a__ , padding=a__ , truncation=a__ , max_length=a__ , return_tensors=a__ , return_attention_mask=a__ , **a__ , )
elif titles is None or texts is None:
A = titles if texts is None else texts
return super().__call__(
a__ , a__ , padding=a__ , truncation=a__ , max_length=a__ , return_tensors=a__ , return_attention_mask=a__ , **a__ , )
A = titles if not isinstance(a__ , a__ ) else [titles]
A = texts if not isinstance(a__ , a__ ) else [texts]
A = len(a__ )
A = questions if not isinstance(a__ , a__ ) else [questions] * n_passages
assert len(a__ ) == len(
a__ ), F"""There should be as many titles than texts but got {len(a__ )} titles and {len(a__ )} texts."""
A = super().__call__(a__ , a__ , padding=a__ , truncation=a__ )["""input_ids"""]
A = super().__call__(a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ )["""input_ids"""]
A = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(a__ , a__ )
]
}
if return_attention_mask is not False:
A = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A = attention_mask
return self.pad(a__ , padding=a__ , max_length=a__ , return_tensors=a__ )
def A (self : str , _lowerCAmelCase : BatchEncoding , _lowerCAmelCase : DPRReaderOutput , _lowerCAmelCase : int = 16 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 4 , ):
A = reader_input["""input_ids"""]
A , A , A = reader_output[:3]
A = len(a__ )
A = sorted(range(a__ ) , reverse=a__ , key=relevance_logits.__getitem__ )
A = []
for doc_id in sorted_docs:
A = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A = sequence_ids.index(self.pad_token_id )
else:
A = len(a__ )
A = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=a__ , top_spans=a__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=a__ , start_index=a__ , end_index=a__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(a__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def A (self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , ):
A = []
for start_index, start_score in enumerate(a__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A = sorted(a__ , key=lambda _lowerCAmelCase : x[1] , reverse=a__ )
A = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
A = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(a__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__a )
class __UpperCAmelCase ( __a , __a ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase = DPRReaderTokenizer
| 360 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
if isinstance(UpperCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCAmelCase :
'''simple docstring'''
def A (self : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ):
pass
def A (self : List[str] ):
pass
def A (self : Union[str, Any] ):
pass
def A (self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ):
A = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def A (self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A (self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=None , **_lowerCAmelCase : List[Any] ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = {"""vision_model""": vision_model, """text_model""": text_model}
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A (self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Any ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
A = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
A = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
A = after_output[0].numpy()
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def A (self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any=None , **_lowerCAmelCase : List[Any] ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
A = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
A = to_atuple(vision_model.config.image_size )
A = to_atuple(vision_model.config.patch_size )
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A (self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ):
A = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def A (self : List[str] ):
A = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def A (self : Optional[int] ):
A = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def A (self : List[Any] ):
A = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def A (self : int ):
A = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def A (self : int ):
A = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def A (self : Tuple ):
A , A = self.get_pretrained_model_and_inputs()
A = model_a(**_lowerCAmelCase )
A = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
A = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
A = model_a(**_lowerCAmelCase )
A = after_outputs[0].numpy()
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : int ):
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : int ):
A = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
A = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : Union[str, Any] ):
A = TFViTModelTester(self )
A = TFBertModelTester(self )
A = vit_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[int] ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : Any ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
A = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A = to_atuple(vision_model.config.image_size )
A = to_atuple(vision_model.config.patch_size )
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A (self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : str ):
A = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
A = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : str ):
A = TFDeiTModelTester(self )
A = TFRobertaModelTester(self )
A = vit_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : Dict ):
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
A = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
A = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : Optional[Any] ):
A = TFCLIPVisionModelTester(self )
A = TFBertModelTester(self )
A = clip_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A (self : Any ):
A = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
A = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
A = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
A = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 337 | 0 |
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
A = [True] * limit
A = False
A = False
A = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
A = i * 2
while index < limit:
A = False
A = index + i
A = [2]
for i in range(3 , UpperCAmelCase , 2 ):
if is_prime[i]:
primes.append(UpperCAmelCase )
return primes
def __a ( UpperCAmelCase = 1000000 ) ->int:
"""simple docstring"""
A = prime_sieve(UpperCAmelCase )
A = 0
A = 0
for i in range(len(UpperCAmelCase ) ):
for j in range(i + length , len(UpperCAmelCase ) ):
A = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
A = j - i
A = sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 361 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Any = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
_lowerCamelCase : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_lowerCamelCase : int = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_lowerCamelCase : List[str] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
_lowerCamelCase : int = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
_lowerCamelCase : int = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
_lowerCamelCase : Optional[int] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
_lowerCamelCase : Optional[Any] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
_lowerCamelCase : Any = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
_lowerCamelCase : Union[str, Any] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
_lowerCamelCase : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
_lowerCamelCase : int = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
_lowerCamelCase : Union[str, Any] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
_lowerCamelCase : Any = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
_lowerCamelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_lowerCamelCase : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_lowerCamelCase : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_lowerCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_lowerCamelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_lowerCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_lowerCamelCase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_lowerCamelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_lowerCamelCase : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_lowerCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_lowerCamelCase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_MAPPING
_lowerCamelCase : Optional[Any] = auto_class_update(FlaxAutoModel)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_lowerCamelCase : List[str] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCamelCase : List[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_lowerCamelCase : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowerCamelCase : Tuple = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCamelCase : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_lowerCamelCase : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_lowerCamelCase : str = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_lowerCamelCase : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_lowerCamelCase : List[Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCamelCase : Union[str, Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_lowerCamelCase : Optional[int] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_lowerCamelCase : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 337 | 0 |
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
if len(__snake_case ) <= 1 or n <= 1:
return
insert_next(__snake_case , n - 1 )
rec_insertion_sort(__snake_case , n - 1 )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
if index >= len(__snake_case ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
A , A = (
collection[index],
collection[index - 1],
)
insert_next(__snake_case , index + 1 )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = input('Enter integers separated by spaces: ')
_lowerCamelCase : List[Any] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 362 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def A (self : Any ):
A = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
A = load_dataset("""ashraq/esc50""" )
A = dataset["""train"""]["""audio"""][-1]["""array"""]
A = audio_classifier(_lowerCAmelCase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def A (self : List[str] ):
pass
@slow
@require_torch
def A (self : int ):
A = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
A = load_dataset("""ashraq/esc50""" )
A = dataset["""train"""]["""audio"""][-1]["""array"""]
A = audio_classifier(_lowerCAmelCase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] , )
A = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
A = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def A (self : Tuple ):
pass
| 337 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __UpperCAmelCase ( _snake_case ):
'''simple docstring'''
__lowerCAmelCase = '''poolformer'''
def __init__(self : List[Any] , _lowerCAmelCase : Optional[Any]=3 , _lowerCAmelCase : List[str]=16 , _lowerCAmelCase : Optional[Any]=16 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : List[Any]=4.0 , _lowerCAmelCase : Optional[int]=[2, 2, 6, 2] , _lowerCAmelCase : str=[64, 128, 320, 512] , _lowerCAmelCase : List[str]=[7, 3, 3, 3] , _lowerCAmelCase : Dict=[4, 2, 2, 2] , _lowerCAmelCase : Union[str, Any]=[2, 1, 1, 1] , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Tuple="gelu" , _lowerCAmelCase : int=True , _lowerCAmelCase : Optional[int]=1e-5 , _lowerCAmelCase : Union[str, Any]=0.02 , **_lowerCAmelCase : Dict , ):
A = num_channels
A = patch_size
A = stride
A = padding
A = pool_size
A = hidden_sizes
A = mlp_ratio
A = depths
A = patch_sizes
A = strides
A = num_encoder_blocks
A = drop_path_rate
A = hidden_act
A = use_layer_scale
A = layer_scale_init_value
A = initializer_range
super().__init__(**UpperCamelCase__ )
class __UpperCAmelCase ( _snake_case ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def A (self : Dict ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A (self : List[str] ):
return 2e-3
| 363 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowerCamelCase : Dict = 'src/diffusers'
_lowerCamelCase : Dict = '.'
# This is to make sure the diffusers module imported is the one in the repo.
_lowerCamelCase : List[str] = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowerCamelCase : Tuple = spec.loader.load_module()
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
return line.startswith(UpperCAmelCase ) or len(UpperCAmelCase ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , UpperCAmelCase ) is not None
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = object_name.split(""".""" )
A = 0
# First let's find the module where our object lives.
A = parts[i]
while i < len(UpperCAmelCase ) and not os.path.isfile(os.path.join(UpperCAmelCase , f"""{module}.py""" ) ):
i += 1
if i < len(UpperCAmelCase ):
A = os.path.join(UpperCAmelCase , parts[i] )
if i >= len(UpperCAmelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(UpperCAmelCase , f"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A = f.readlines()
# Now let's find the class / func in the code!
A = """"""
A = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A = line_index
while line_index < len(UpperCAmelCase ) and _should_continue(lines[line_index] , UpperCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
return "".join(UpperCAmelCase )
_lowerCamelCase : str = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
_lowerCamelCase : Any = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
_lowerCamelCase : str = re.compile(R'<FILL\s+[^>]*>')
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
A = code.split("""\n""" )
A = 0
while idx < len(UpperCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
A = len(get_indent(UpperCAmelCase ) ) > 0
if has_indent:
A = f"""class Bla:\n{code}"""
A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCAmelCase )
A = black.format_str(UpperCAmelCase , mode=UpperCAmelCase )
A , A = style_docstrings_in_code(UpperCAmelCase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def __a ( UpperCAmelCase , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
with open(UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A = f.readlines()
A = []
A = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase ):
A = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A , A , A = search.groups()
A = find_code_in_diffusers(UpperCAmelCase )
A = get_indent(UpperCAmelCase )
A = line_index + 1 if indent == theoretical_indent else line_index + 2
A = theoretical_indent
A = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A = True
while line_index < len(UpperCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase ):
break
A = lines[line_index]
A = _should_continue(UpperCAmelCase , UpperCAmelCase ) and re.search(f"""^{indent}# End copy""" , UpperCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
A = """""".join(UpperCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
A = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCAmelCase ) is None]
A = """\n""".join(UpperCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase ) > 0:
A = replace_pattern.replace("""with""" , """""" ).split(""",""" )
A = [_re_replace_pattern.search(UpperCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A , A , A = pattern.groups()
A = re.sub(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if option.strip() == "all-casing":
A = re.sub(obja.lower() , obja.lower() , UpperCAmelCase )
A = re.sub(obja.upper() , obja.upper() , UpperCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A = blackify(lines[start_index - 1] + theoretical_code )
A = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A = lines[:start_index] + [theoretical_code] + lines[line_index:]
A = start_index + 1
if overwrite and len(UpperCAmelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase )
return diffs
def __a ( UpperCAmelCase = False ) ->int:
"""simple docstring"""
A = glob.glob(os.path.join(UpperCAmelCase , """**/*.py""" ) , recursive=UpperCAmelCase )
A = []
for filename in all_files:
A = is_copy_consistent(UpperCAmelCase , UpperCAmelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(UpperCAmelCase ) > 0:
A = """\n""".join(UpperCAmelCase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_lowerCamelCase : Any = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 337 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Dict = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __UpperCAmelCase ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase = """mobilenet_v1"""
def __init__(self : List[Any] , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Optional[int]=224 , _lowerCAmelCase : Union[str, Any]=1.0 , _lowerCAmelCase : int=8 , _lowerCAmelCase : Dict="relu6" , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Optional[int]=0.999 , _lowerCAmelCase : Union[str, Any]=0.02 , _lowerCAmelCase : Union[str, Any]=0.001 , **_lowerCAmelCase : str , ):
super().__init__(**_A )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
A = num_channels
A = image_size
A = depth_multiplier
A = min_depth
A = hidden_act
A = tf_padding
A = classifier_dropout_prob
A = initializer_range
A = layer_norm_eps
class __UpperCAmelCase ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def A (self : int ):
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def A (self : Optional[int] ):
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def A (self : Union[str, Any] ):
return 1e-4
| 364 |
'''simple docstring'''
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
A = credit_card_number
A = 0
A = len(UpperCAmelCase ) - 2
for i in range(UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
A = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
A = cc_number[:i] + str(UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
A = f"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(f"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(UpperCAmelCase ) <= 16:
print(f"""{error_message} of its length.""" )
return False
if not validate_initial_digits(UpperCAmelCase ):
print(f"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(UpperCAmelCase ):
print(f"""{error_message} it fails the Luhn check.""" )
return False
print(f"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 337 | 0 |
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
A = torch.load(snake_case__ , map_location="""cpu""" )
A = chkpt['model']
# We have the base model one level deeper than the original XLM repository
A = {}
for k, v in state_dict.items():
if "pred_layer" in k:
A = v
else:
A = v
A = chkpt['params']
A = {n: v for n, v in config.items() if not isinstance(snake_case__ , (torch.FloatTensor, numpy.ndarray) )}
A = chkpt['dico_word2id']
A = {s + '</w>' if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
A = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
A = pytorch_dump_folder_path + '/' + CONFIG_NAME
A = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(snake_case__ , snake_case__ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case__ , indent=2 ) + """\n""" )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case__ , indent=2 ) + """\n""" )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 365 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Any , _lowerCAmelCase : List[Any] ):
A = str(id_ )
A = None
A = None
A = []
A = {} # {vertex:distance}
def __lt__(self : List[Any] , _lowerCAmelCase : Tuple ):
return self.key < other.key
def __repr__(self : str ):
return self.id
def A (self : Union[str, Any] , _lowerCAmelCase : List[str] ):
self.neighbors.append(_lowerCAmelCase )
def A (self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ):
A = weight
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , UpperCAmelCase )
graph[b - 1].add_edge(graph[a - 1] , UpperCAmelCase )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->list:
"""simple docstring"""
A = []
for u in graph:
A = math.inf
A = None
A = 0
A = graph[:]
while q:
A = min(UpperCAmelCase )
q.remove(UpperCAmelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
A = u
A = u.edges[v.id]
for i in range(1 , len(UpperCAmelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Iterator[tuple]:
"""simple docstring"""
for u in graph:
A = math.inf
A = None
A = 0
A = list(UpperCAmelCase )
hq.heapify(UpperCAmelCase )
while h:
A = hq.heappop(UpperCAmelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
A = u
A = u.edges[v.id]
hq.heapify(UpperCAmelCase )
for i in range(1 , len(UpperCAmelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __a ( ) ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCamelCase : str = logging.get_logger(__name__)
class __UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = ['''pixel_values''']
def __init__(self : str , _lowerCAmelCase : List[str] = True , _lowerCAmelCase : Optional[Any] = None , _lowerCAmelCase : Dict = PILImageResampling.BICUBIC , _lowerCAmelCase : Tuple = True , _lowerCAmelCase : int = True , _lowerCAmelCase : int = 1 / 255 , _lowerCAmelCase : Any = None , _lowerCAmelCase : List[Any] = True , _lowerCAmelCase : Union[str, Any] = None , _lowerCAmelCase : List[str] = None , **_lowerCAmelCase : List[Any] , ):
super().__init__(**_lowerCAmelCase )
A = size if size is not None else {"""height""": 224, """width""": 224}
A = get_size_dict(_lowerCAmelCase )
A = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase , param_name="""crop_size""" )
A = do_resize
A = do_rescale
A = do_normalize
A = do_center_crop
A = crop_size
A = size
A = resample
A = rescale_factor
A = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A (self : int , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple = PILImageResampling.BILINEAR , _lowerCAmelCase : str = None , **_lowerCAmelCase : str , ):
A = get_size_dict(_lowerCAmelCase )
if "shortest_edge" in size:
A = get_resize_output_image_size(_lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=_lowerCAmelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
A = (size["""height"""], size["""width"""])
else:
raise ValueError(F"""Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}""" )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def A (self : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] = None , **_lowerCAmelCase : Tuple , ):
A = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def A (self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict = None , **_lowerCAmelCase : Optional[int] ):
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def A (self : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] = None , **_lowerCAmelCase : Optional[Any] , ):
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def A (self : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Any = None , _lowerCAmelCase : int = None , _lowerCAmelCase : List[str] = None , _lowerCAmelCase : int = None , _lowerCAmelCase : Tuple = None , _lowerCAmelCase : Tuple = None , _lowerCAmelCase : List[Any] = None , _lowerCAmelCase : Tuple = None , _lowerCAmelCase : Optional[Any] = None , _lowerCAmelCase : Dict = None , _lowerCAmelCase : str = ChannelDimension.FIRST , **_lowerCAmelCase : List[Any] , ):
A = do_resize if do_resize is not None else self.do_resize
A = do_rescale if do_rescale is not None else self.do_rescale
A = do_normalize if do_normalize is not None else self.do_normalize
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowerCAmelCase , param_name="""crop_size""" , default_to_square=_lowerCAmelCase )
A = resample if resample is not None else self.resample
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = size if size is not None else self.size
A = get_size_dict(_lowerCAmelCase )
if not is_batched(_lowerCAmelCase ):
A = [images]
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
A = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
A = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
A = {"""pixel_values""": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 366 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''perceiver'''
def __init__(self : Dict , _lowerCAmelCase : List[str]=256 , _lowerCAmelCase : Any=1280 , _lowerCAmelCase : Dict=768 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : Optional[int]=26 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : List[Any]="kv" , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : int=1 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Any=1e-12 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : int=262 , _lowerCAmelCase : int=2048 , _lowerCAmelCase : int=56 , _lowerCAmelCase : List[Any]=[368, 496] , _lowerCAmelCase : List[Any]=16 , _lowerCAmelCase : Any=1920 , _lowerCAmelCase : Optional[int]=16 , _lowerCAmelCase : List[Any]=[1, 16, 224, 224] , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
A = num_latents
A = d_latents
A = d_model
A = num_blocks
A = num_self_attends_per_block
A = num_self_attention_heads
A = num_cross_attention_heads
A = qk_channels
A = v_channels
A = cross_attention_shape_for_attention
A = self_attention_widening_factor
A = cross_attention_widening_factor
A = hidden_act
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = use_query_residual
# masked language modeling attributes
A = vocab_size
A = max_position_embeddings
# image classification attributes
A = image_size
# flow attributes
A = train_size
# multimodal autoencoding attributes
A = num_frames
A = audio_samples_per_frame
A = samples_per_patch
A = output_shape
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@property
def A (self : List[str] ):
if self.task == "multiple-choice":
A = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def A (self : Dict ):
return 1e-4
def A (self : List[Any] , _lowerCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 40 , _lowerCAmelCase : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = preprocessor.num_special_tokens_to_add(_lowerCAmelCase )
A = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
A = [""" """.join(["""a"""] ) * seq_length] * batch_size
A = dict(preprocessor(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
A = inputs.pop("""input_ids""" )
return inputs
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
A = self._generate_dummy_images(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A = dict(preprocessor(images=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
A = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 337 | 0 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __a ( ) ->List[str]:
"""simple docstring"""
A = torch.nn.Linear(2 , 4 )
A = torch.optim.AdamW(model.parameters() , lr=1.0 )
A = torch.optim.lr_scheduler.OneCycleLR(UpperCAmelCase , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
A = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
A = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __a ( UpperCAmelCase ) ->Any:
"""simple docstring"""
A = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(UpperCAmelCase )
class __UpperCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@require_cuda
def A (self : Dict ):
A = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__lowercase ):
A = Accelerator(cpu=__lowercase )
def A (self : Tuple ):
A = Accelerator()
A = GradientState()
assert state.num_steps == 1
A = 4
assert state.num_steps == 4
assert state.sync_gradients is True
A = False
assert state.sync_gradients is False
GradientState._reset_state()
def A (self : str ):
A = Accelerator()
A = create_components()
(
A
) = accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def A (self : int ):
A = Accelerator()
A = create_components()
accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def A (self : Any ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*_lowerCAmelCase : List[Any] , **_lowerCAmelCase : int ):
pass
with patch("""torch.cuda.set_device""" , __lowercase ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ):
A = Accelerator()
self.assertEqual(str(accelerator.state.device ) , """cuda:64""" )
def A (self : Union[str, Any] ):
A = Accelerator()
A = create_components()
accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
A = get_signature(__lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowercase )
# make sure random weights don't match
load_random_weights(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) < 1e-3 )
def A (self : List[Any] ):
A = Accelerator()
A = create_components()
accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
A = get_signature(__lowercase )
# saving hook
def save_config(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
A = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(__lowercase , """data.json""" ) , """w""" ) as f:
json.dump(__lowercase , __lowercase )
# loading hook
def load_config(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
with open(os.path.join(__lowercase , """data.json""" ) , """r""" ) as f:
A = json.load(__lowercase )
A = config['''class_name''']
A = accelerator.register_save_state_pre_hook(__lowercase )
A = accelerator.register_load_state_pre_hook(__lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowercase )
# make sure random weights don't match with hooks
load_random_weights(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) > 1e-3 )
# random class name to verify correct one is loaded
A = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowercase )
# make sure random weights don't match with hooks removed
load_random_weights(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) > 1e-3 )
# random class name to verify correct one is loaded
A = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def A (self : Dict ):
A = Accelerator()
A = create_components()
A = None
# This should work
A = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.assertTrue(dummy_obj is None )
def A (self : Any ):
A = Accelerator()
A = create_components()
A = [1, 2, 3]
# This should work
A = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.assertEqual(
getattr(__lowercase , """_is_accelerate_prepared""" , __lowercase ) , __lowercase , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(__lowercase , """_is_accelerate_prepared""" , __lowercase ) , __lowercase , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__lowercase , """_is_accelerate_prepared""" , __lowercase ) , __lowercase , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__lowercase , """_is_accelerate_prepared""" , __lowercase ) , __lowercase , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__lowercase , """_is_accelerate_prepared""" , __lowercase ) , __lowercase , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__lowercase , """_is_accelerate_prepared""" , __lowercase ) , __lowercase , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def A (self : Optional[Any] ):
from transformers import AutoModelForCausalLM
A = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__lowercase , device_map={"""""": 0} , )
A = Accelerator()
# This should work
A = accelerator.prepare(__lowercase )
@slow
@require_bnb
def A (self : int ):
from transformers import AutoModelForCausalLM
A = Accelerator()
with init_empty_weights():
A = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
A = infer_auto_device_map(__lowercase )
A = '''cpu'''
A = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=__lowercase , load_in_abit=__lowercase , llm_inta_enable_fpaa_cpu_offload=__lowercase )
# This should not work and get value error
with self.assertRaises(__lowercase ):
A = accelerator.prepare(__lowercase )
@slow
@require_bnb
@require_multi_gpu
def A (self : Optional[Any] ):
from transformers import AutoModelForCausalLM
A = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
A = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
A = infer_auto_device_map(__lowercase )
A = 1
A = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__lowercase , device_map=__lowercase , )
A = Accelerator()
# This should not work and get value error
with self.assertRaises(__lowercase ):
A = accelerator.prepare(__lowercase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def A (self : Union[str, Any] ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
A = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
A = infer_auto_device_map(__lowercase )
A = 1
A = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__lowercase , device_map=__lowercase , )
A = Accelerator()
# This should work
A = accelerator.prepare(__lowercase )
@require_cuda
def A (self : List[str] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , lr=0.01 )
A = Accelerator(cpu=__lowercase )
A = accelerator.prepare(__lowercase )
| 367 |
'''simple docstring'''
import math
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : int , _lowerCAmelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
A = n
A = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # adjacency matrix for weight
A = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # dp[i][j] stores minimum distance from i to j
def A (self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
A = w
def A (self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def A (self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ):
return self.dp[u][v]
if __name__ == "__main__":
_lowerCamelCase : str = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
def __init__(self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
A = dataset
A = process
A = params
def __len__(self : List[str] ):
return len(self.dataset )
def __getitem__(self : Union[str, Any] , _lowerCAmelCase : List[str] ):
A = self.dataset[i]
A = self.process(__a , **self.params )
return processed
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
def __init__(self : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int]=None ):
A = loader
A = infer
A = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
A = None
A = loader_batch_size
# Internal bookkeeping
A = None
A = None
def __len__(self : Any ):
return len(self.loader )
def __iter__(self : Dict ):
A = iter(self.loader )
return self
def A (self : Tuple ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
A = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
A = {}
for k, element in self._loader_batch_data.items():
if isinstance(__a , __a ):
# Convert ModelOutput to tuple first
A = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
A = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
A = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(__a , __a ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
A = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
A = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
A = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
A = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
A = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
A = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
A = self._loader_batch_data.__class__(__a )
self._loader_batch_index += 1
return result
def A (self : Optional[Any] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
A = next(self.iterator )
A = self.infer(__a , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(__a , torch.Tensor ):
A = processed
else:
A = list(processed.keys() )[0]
A = processed[key]
if isinstance(__a , __a ):
A = len(__a )
else:
A = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
A = observed_batch_size
# Setting internal index to unwrap the batch
A = processed
A = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
def __init__(self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=None ):
super().__init__(__a , __a , __a )
def __iter__(self : Tuple ):
A = iter(self.loader )
A = None
return self
def A (self : str ):
if self.subiterator is None:
A = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
A = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
A = self.infer(next(self.iterator ) , **self.params )
A = next(self.subiterator )
return processed
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
def __iter__(self : str ):
A = iter(self.loader )
return self
def A (self : Optional[int] ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
A = False
A = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
A = self.loader_batch_item()
A = item.pop("""is_last""" )
accumulator.append(__a )
if is_last:
return accumulator
while not is_last:
A = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(__a , torch.Tensor ):
A = processed
else:
A = list(processed.keys() )[0]
A = processed[key]
if isinstance(__a , __a ):
A = len(__a )
else:
A = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
A = observed_batch_size
A = processed
A = 0
while self._loader_batch_index < self.loader_batch_size:
A = self.loader_batch_item()
A = item.pop("""is_last""" )
accumulator.append(__a )
if is_last:
return accumulator
else:
A = processed
A = item.pop("""is_last""" )
accumulator.append(__a )
return accumulator
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
def __init__(self : str , _lowerCAmelCase : Dataset , _lowerCAmelCase : str ):
A = dataset
A = key
def __len__(self : List[str] ):
return len(self.dataset )
def __getitem__(self : Union[str, Any] , _lowerCAmelCase : int ):
return self.dataset[i][self.key]
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
def __init__(self : int , _lowerCAmelCase : Dataset , _lowerCAmelCase : str , _lowerCAmelCase : str ):
A = dataset
A = keya
A = keya
def __len__(self : Union[str, Any] ):
return len(self.dataset )
def __getitem__(self : Optional[int] , _lowerCAmelCase : Optional[int] ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 368 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase : List[str] = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase : List[str] = {
'Salesforce/codegen-350M-mono': 2048,
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = CodeGenTokenizer
def __init__(self : int , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Optional[Any]="<|endoftext|>" , _lowerCAmelCase : Dict="<|endoftext|>" , _lowerCAmelCase : Dict="<|endoftext|>" , _lowerCAmelCase : Any=False , **_lowerCAmelCase : Optional[int] , ):
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
if kwargs.pop("""add_bos_token""" , _lowerCAmelCase ):
A = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
F"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
F"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowerCAmelCase ) != add_prefix_space:
A = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
A = add_prefix_space
A = pre_tok_class(**_lowerCAmelCase )
A = add_prefix_space
def A (self : int , *_lowerCAmelCase : int , **_lowerCAmelCase : List[Any] ):
A = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[Any] ):
A = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def A (self : Tuple , _lowerCAmelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[List[str]] = None , **_lowerCAmelCase : Tuple , ):
A = super().decode(
token_ids=_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase , **_lowerCAmelCase , )
if truncate_before_pattern is not None and len(_lowerCAmelCase ) > 0:
A = self.truncate(_lowerCAmelCase , _lowerCAmelCase )
return decoded_text
def A (self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
def find_re(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ):
A = pattern.search(_lowerCAmelCase , _lowerCAmelCase )
return m.start() if m else -1
A = [re.compile(_lowerCAmelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
A = list(re.finditer("""^print""" , _lowerCAmelCase , re.MULTILINE ) )
if len(_lowerCAmelCase ) > 1:
A = completion[: prints[1].start()]
A = list(re.finditer("""^def""" , _lowerCAmelCase , re.MULTILINE ) )
if len(_lowerCAmelCase ) > 1:
A = completion[: defs[1].start()]
A = 0
A = [
pos for pos in [find_re(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for terminal in terminals] if pos != -1
]
if len(_lowerCAmelCase ) > 0:
return completion[: min(_lowerCAmelCase )]
else:
return completion
| 337 | 0 |
import math
import unittest
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : str ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def A (self : List[Any] ):
with self.assertRaises(_UpperCAmelCase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn\'t have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 369 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Optional[Any] = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 337 | 0 |
'''simple docstring'''
def __a ( UpperCAmelCase ) ->float:
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
A = sum(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowerCAmelCase )
try:
pickle.loads(pickle.dumps(_lowerCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 337 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.