code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
UpperCAmelCase_ : str = ['''small''', '''medium''', '''large''']
UpperCAmelCase_ : Union[str, Any] = '''lm_head.decoder.weight'''
UpperCAmelCase_ : Optional[int] = '''lm_head.weight'''
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : str )-> int:
'''simple docstring'''
__snake_case = torch.load(_lowerCamelCase )
__snake_case = d.pop(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
UpperCAmelCase_ : List[Any] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
UpperCAmelCase_ : Any = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
UpperCAmelCase_ : Union[str, Any] = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 24 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ) -> Any:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Tuple = True
__lowercase : Optional[int] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__SCREAMING_SNAKE_CASE )
__snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class lowerCAmelCase ( unittest.TestCase):
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__snake_case = jnp.array([[0, 1, 2, 3, 4, 5]] )
__snake_case = model(__SCREAMING_SNAKE_CASE )[0]
__snake_case = 5_0000
__snake_case = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__snake_case = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 24 | 1 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : List[str] )-> Union[str, Any]:
'''simple docstring'''
try:
with open(_lowerCamelCase , '''rb''' ) as flax_state_f:
__snake_case = from_bytes(_lowerCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_lowerCamelCase ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(_lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Union[str, Any] )-> Any:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
__snake_case = flatten_dict(jax.tree_util.tree_map(lambda _lowerCamelCase : x.dtype == jnp.bfloataa , _lowerCamelCase ) ).values()
if any(_lowerCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
__snake_case = jax.tree_util.tree_map(
lambda _lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowerCamelCase )
__snake_case = ''''''
__snake_case = flatten_dict(_lowerCamelCase , sep='''.''' )
__snake_case = pt_model.state_dict()
# keep track of unexpected & missing keys
__snake_case = []
__snake_case = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__snake_case = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__snake_case = flax_key_tuple_array[:-1] + ['''weight''']
__snake_case = jnp.transpose(_lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__snake_case = flax_key_tuple_array[:-1] + ['''weight''']
__snake_case = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__snake_case = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_lowerCamelCase ):
__snake_case = (
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
__snake_case = '''.'''.join(_lowerCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
__snake_case = np.asarray(_lowerCamelCase ) if not isinstance(_lowerCamelCase , np.ndarray ) else flax_tensor
__snake_case = torch.from_numpy(_lowerCamelCase )
# remove from missing keys
missing_keys.remove(_lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowerCamelCase )
pt_model.load_state_dict(_lowerCamelCase )
# re-transform missing_keys to list
__snake_case = list(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(_lowerCamelCase ) > 0:
logger.warning(
f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
''' use it for predictions and inference.''' )
return pt_model
| 24 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
__snake_case = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] )-> Tuple:
'''simple docstring'''
__snake_case = dct.pop(_lowerCamelCase )
__snake_case = val
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple )-> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__snake_case = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
__snake_case = qkv_bias
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Tuple )-> Dict:
'''simple docstring'''
__snake_case = 3_64 if '''coco''' in model_name else 2_24
__snake_case = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__snake_case = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=False )-> Dict:
'''simple docstring'''
__snake_case = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__snake_case = tokenizer('''\n''' , add_special_tokens=_lowerCamelCase ).input_ids[0]
__snake_case , __snake_case = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
__snake_case = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
__snake_case = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__snake_case , __snake_case = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case , __snake_case , __snake_case = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
__snake_case = original_model.state_dict()
__snake_case = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__snake_case = state_dict.pop(_lowerCamelCase )
if key.startswith('''Qformer.bert''' ):
__snake_case = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__snake_case = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__snake_case = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__snake_case = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__snake_case = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__snake_case = key.replace('''t5''' , '''language''' )
__snake_case = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__snake_case = load_demo_image()
__snake_case = vis_processors['''eval'''](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
__snake_case = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
# create processor
__snake_case = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__snake_case = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
__snake_case = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
__snake_case = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__snake_case = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__snake_case = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__snake_case = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
__snake_case = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__snake_case = ''''''
__snake_case = tokenizer(_lowerCamelCase , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
__snake_case = original_model.generate({'''image''': original_pixel_values} )
__snake_case = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , _lowerCamelCase )
__snake_case = input_ids.shape[1]
__snake_case = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
__snake_case = [text.strip() for text in output_text]
print('''HF generation:''' , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
UpperCAmelCase_ : Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24 | 1 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] )-> int:
'''simple docstring'''
__snake_case = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
__snake_case = DatasetInfosDict.from_directory(_lowerCamelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : DatasetInfo )-> Any:
'''simple docstring'''
__snake_case = str(_lowerCamelCase )
dataset_info.write_to_directory(_lowerCamelCase )
__snake_case = DatasetInfo.from_directory(_lowerCamelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_lowerCamelCase , '''dataset_info.json''' ) )
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
__snake_case = dataset_info._to_yaml_dict()
assert sorted(_lowerCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__snake_case = yaml.safe_dump(_lowerCamelCase )
__snake_case = yaml.safe_load(_lowerCamelCase )
assert dataset_info_yaml_dict == reloaded
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
__snake_case = DatasetInfo()
__snake_case = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : DatasetInfosDict )-> Optional[int]:
'''simple docstring'''
__snake_case = str(_lowerCamelCase )
dataset_infos_dict.write_to_directory(_lowerCamelCase )
__snake_case = DatasetInfosDict.from_directory(_lowerCamelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__snake_case = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__snake_case = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_lowerCamelCase , '''README.md''' ) )
| 24 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case , __snake_case = image[0].size
__snake_case , __snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return image
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__snake_case , __snake_case = mask[0].size
__snake_case , __snake_case = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__snake_case = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = mask.astype(np.floataa ) / 255.0
__snake_case = 0
__snake_case = 1
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return mask
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : UNetaDModel
__lowercase : RePaintScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 250 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__snake_case = image
__snake_case = _preprocess_image(__SCREAMING_SNAKE_CASE )
__snake_case = original_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = _preprocess_mask(__SCREAMING_SNAKE_CASE )
__snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case = original_image.shape
__snake_case = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
__snake_case = eta
__snake_case = self.scheduler.timesteps[0] + 1
__snake_case = generator[0] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
__snake_case = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__snake_case = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = t
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 24 | 1 |
'''simple docstring'''
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCAmelCase_ : List[str] = 2_0_4_8
UpperCAmelCase_ : List[Any] = 4_0_9_6
UpperCAmelCase_ : Any = 4_2
UpperCAmelCase_ : List[str] = os.environ.pop('''PROCESS_TRAIN''', '''false''')
UpperCAmelCase_ : List[Any] = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[Any]:
'''simple docstring'''
def choose_first(_lowerCamelCase : Any , _lowerCamelCase : Dict=False ):
assert isinstance(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) == 1:
__snake_case = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__snake_case = {k: [a[k]] for k in a}
if len(a['''start_token'''] ) > 0:
break
return a
__snake_case = {'''id''': example['''id''']}
__snake_case = example['''annotations''']
__snake_case = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
__snake_case = ['''yes'''] if 1 in yes_no_answer else ['''no''']
__snake_case = __snake_case = []
__snake_case = __snake_case = []
__snake_case = ['''<cls>''']
else:
__snake_case = ['''short''']
__snake_case = choose_first(annotation['''short_answers'''] )
if len(out['''start_token'''] ) == 0:
# answer will be long if short is not available
__snake_case = ['''long''']
__snake_case = choose_first(annotation['''long_answer'''] , is_long_answer=_lowerCamelCase )
__snake_case = []
answer.update(_lowerCamelCase )
# disregard some samples
if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]:
__snake_case = True
else:
__snake_case = False
__snake_case = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] , _lowerCamelCase ) for k in cols ):
raise ValueError('''Issue in ID''' , example['''id'''] )
return answer
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Any=False )-> Any:
'''simple docstring'''
__snake_case = _get_single_answer(_lowerCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__snake_case = example['''document''']['''tokens''']
__snake_case = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
return {
"context": " ".join(_lowerCamelCase ),
"answer": {
"start_token": -1_00, # ignore index in cross-entropy
"end_token": -1_00, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__snake_case = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__snake_case = example['''document''']['''tokens''']
__snake_case = answer['''start_token''']
__snake_case = answer['''end_token''']
__snake_case = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__snake_case = ''' '''.join(context[start_token:end_token] )
# checking above code
if assertion:
__snake_case = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
__snake_case = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
__snake_case = ''' '''.join([old[i] for i in range(len(_lowerCamelCase ) ) if not is_html[i]] )
if new != old:
print('''ID:''' , example['''id'''] )
print('''New:''' , _lowerCamelCase , end='''\n''' )
print('''Old:''' , _lowerCamelCase , end='''\n\n''' )
return {
"context": " ".join(_lowerCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def _UpperCamelCase (_lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str]=20_48 , _lowerCamelCase : List[Any]=40_96 , _lowerCamelCase : Union[str, Any]=True )-> List[Any]:
'''simple docstring'''
__snake_case = get_context_and_ans(_lowerCamelCase , assertion=_lowerCamelCase )
__snake_case = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__snake_case = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids
__snake_case = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__snake_case = []
__snake_case = []
__snake_case = input_ids[:q_len]
__snake_case = range(_lowerCamelCase , len(_lowerCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
__snake_case = i + max_length - q_len
__snake_case = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['''category'''][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_00] * len(_lowerCamelCase ),
"end_token": [-1_00] * len(_lowerCamelCase ),
"category": category,
},
}
__snake_case = out['''context'''].split()
__snake_case = splitted_context[answer['''end_token''']]
__snake_case = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=_lowerCamelCase , ).input_ids )
__snake_case = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=_lowerCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__snake_case = len(tokenizer(_lowerCamelCase , add_special_tokens=_lowerCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__snake_case = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
__snake_case = answer['''start_token''']
__snake_case = answer['''end_token''']
if assertion:
__snake_case = tokenizer.decode(_lowerCamelCase )
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''' )
print('''OLD:''' , answer['''span'''] )
print('''NEW:''' , _lowerCamelCase , end='''\n\n''' )
if len(_lowerCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__snake_case = input_ids[:q_len]
__snake_case = range(_lowerCamelCase , len(_lowerCamelCase ) , max_length - doc_stride )
__snake_case = []
__snake_case = []
__snake_case = []
__snake_case = [] # null, yes, no, long, short
for i in doc_start_indices:
__snake_case = i + max_length - q_len
__snake_case = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__snake_case = start_token - i + q_len
__snake_case = end_token - i + q_len
answers_category.append(answer['''category'''][0] ) # ["short"] -> "short"
else:
__snake_case = -1_00
__snake_case = -1_00
answers_category.append('''null''' )
__snake_case = inputs[-1][start_token : end_token + 1]
answers_start_token.append(_lowerCamelCase )
answers_end_token.append(_lowerCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''' , example['''id'''] )
print('''New:''' , tokenizer.decode(_lowerCamelCase ) )
print('''Old:''' , tokenizer.decode(_lowerCamelCase ) , end='''\n\n''' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : int=20_48 , _lowerCamelCase : Optional[Any]=40_96 , _lowerCamelCase : Any=False )-> Any:
'''simple docstring'''
__snake_case = get_strided_contexts_and_ans(
_lowerCamelCase , _lowerCamelCase , doc_stride=_lowerCamelCase , max_length=_lowerCamelCase , assertion=_lowerCamelCase , )
return example
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : Dict )-> int:
'''simple docstring'''
with jsonlines.open(_lowerCamelCase , '''a''' ) as writer:
for example in tqdm(_lowerCamelCase , total=len(_lowerCamelCase ) , desc='''Saving samples ... ''' ):
__snake_case = example['''labels''']
for ids, start, end, cat in zip(
example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCAmelCase_ : Union[str, Any] = load_dataset('''natural_questions''')
UpperCAmelCase_ : Any = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
UpperCAmelCase_ : int = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
UpperCAmelCase_ : List[str] = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
UpperCAmelCase_ : Dict = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCAmelCase_ : Optional[int] = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
UpperCAmelCase_ : Any = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 24 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = {}
if "candidate_labels" in kwargs:
__snake_case = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__snake_case = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="This is a photo of {}." ) -> Optional[Any]:
'''simple docstring'''
__snake_case = load_image(__SCREAMING_SNAKE_CASE )
__snake_case = self.image_processor(images=[image] , return_tensors=self.framework )
__snake_case = candidate_labels
__snake_case = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__snake_case = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__snake_case = [text_inputs]
return inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = model_inputs.pop('''candidate_labels''' )
__snake_case = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = text_inputs[0]
else:
# Batching case.
__snake_case = text_inputs[0][0]
__snake_case = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = model_outputs.pop('''candidate_labels''' )
__snake_case = model_outputs['''logits'''][0]
if self.framework == "pt":
__snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
__snake_case = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [scores]
elif self.framework == "tf":
__snake_case = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__snake_case = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__snake_case = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 24 | 1 |
'''simple docstring'''
import string
from math import logaa
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : str )-> int:
'''simple docstring'''
__snake_case = document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
__snake_case = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : str )-> tuple[int, int]:
'''simple docstring'''
__snake_case = corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
__snake_case = corpus_without_punctuation.split('''\n''' )
__snake_case = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_lowerCamelCase ))
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=False )-> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int )-> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 24 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase (_lowerCamelCase : int )-> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCamelCase (_lowerCamelCase : int )-> list[int]:
'''simple docstring'''
__snake_case = str(_lowerCamelCase )
__snake_case = [n]
for i in range(1 , len(_lowerCamelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _UpperCamelCase (_lowerCamelCase : int )-> bool:
'''simple docstring'''
if len(str(_lowerCamelCase ) ) > 3:
if not is_prime(int(str(_lowerCamelCase )[-3:] ) ) or not is_prime(int(str(_lowerCamelCase )[:3] ) ):
return False
return True
def _UpperCamelCase (_lowerCamelCase : int = 11 )-> list[int]:
'''simple docstring'''
__snake_case = []
__snake_case = 13
while len(_lowerCamelCase ) != count:
if validate(_lowerCamelCase ):
__snake_case = list_truncated_nums(_lowerCamelCase )
if all(is_prime(_lowerCamelCase ) for i in list_nums ):
list_truncated_primes.append(_lowerCamelCase )
num += 2
return list_truncated_primes
def _UpperCamelCase ()-> int:
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""")
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) )
def _UpperCamelCase ()-> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase : Callable , _lowerCamelCase : int ) -> None:
__snake_case = f'''{func.__name__}({value})'''
__snake_case = timeit(f'''__main__.{call}''' , setup='''import __main__''' )
print(f'''{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 1 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=_lowerCamelCase , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=_lowerCamelCase , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=_lowerCamelCase , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=_lowerCamelCase , default='''data/dump''' , help='''The dump file prefix.''' )
__snake_case = parser.parse_args()
logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
__snake_case = BertTokenizer.from_pretrained(args.tokenizer_name )
__snake_case = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__snake_case = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__snake_case = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__snake_case = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__snake_case = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__snake_case = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__snake_case = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__snake_case = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(f'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
__snake_case = fp.readlines()
logger.info('''Start encoding''' )
logger.info(f'''{len(_lowerCamelCase )} examples to process.''' )
__snake_case = []
__snake_case = 0
__snake_case = 1_00_00
__snake_case = time.time()
for text in data:
__snake_case = f'''{bos} {text.strip()} {sep}'''
__snake_case = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
rslt.append(_lowerCamelCase )
iter += 1
if iter % interval == 0:
__snake_case = time.time()
logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
__snake_case = time.time()
logger.info('''Finished binarization''' )
logger.info(f'''{len(_lowerCamelCase )} examples processed.''' )
__snake_case = f'''{args.dump_file}.{args.tokenizer_name}.pickle'''
__snake_case = tokenizer.vocab_size
if vocab_size < (1 << 16):
__snake_case = [np.uintaa(_lowerCamelCase ) for d in rslt]
else:
__snake_case = [np.intaa(_lowerCamelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'''Dump to {dp_file}''' )
with open(_lowerCamelCase , '''wb''' ) as handle:
pickle.dump(rslt_ , _lowerCamelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__snake_case = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_lowerCamelCase ) , '''Postfix'''.center(_lowerCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
__snake_case = ''')''' # change "(" to ")"
elif infix[i] == ")":
__snake_case = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase_ : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 24 | 1 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCAmelCase ( __lowerCAmelCase):
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = SMALL_MODEL_IDENTIFIER
__snake_case = '''pt'''
__snake_case = '''tf'''
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = TFAutoModel.from_pretrained(self.test_model , from_pt=__SCREAMING_SNAKE_CASE )
model_tf.save_pretrained(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''mock_framework'''
# Framework provided - return whatever the user provides
__snake_case = FeaturesManager.determine_framework(self.test_model , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__SCREAMING_SNAKE_CASE )
__snake_case = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__SCREAMING_SNAKE_CASE )
__snake_case = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__SCREAMING_SNAKE_CASE )
__snake_case = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__SCREAMING_SNAKE_CASE )
__snake_case = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__snake_case = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = MagicMock(return_value=__SCREAMING_SNAKE_CASE )
with patch('''transformers.onnx.features.is_tf_available''' , __SCREAMING_SNAKE_CASE ):
__snake_case = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__snake_case = MagicMock(return_value=__SCREAMING_SNAKE_CASE )
with patch('''transformers.onnx.features.is_torch_available''' , __SCREAMING_SNAKE_CASE ):
__snake_case = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_tf )
# Both in environment -> use PyTorch
__snake_case = MagicMock(return_value=__SCREAMING_SNAKE_CASE )
__snake_case = MagicMock(return_value=__SCREAMING_SNAKE_CASE )
with patch('''transformers.onnx.features.is_tf_available''' , __SCREAMING_SNAKE_CASE ), patch(
'''transformers.onnx.features.is_torch_available''' , __SCREAMING_SNAKE_CASE ):
__snake_case = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_pt )
# Both not in environment -> raise error
__snake_case = MagicMock(return_value=__SCREAMING_SNAKE_CASE )
__snake_case = MagicMock(return_value=__SCREAMING_SNAKE_CASE )
with patch('''transformers.onnx.features.is_tf_available''' , __SCREAMING_SNAKE_CASE ), patch(
'''transformers.onnx.features.is_torch_available''' , __SCREAMING_SNAKE_CASE ):
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__snake_case = FeaturesManager.determine_framework(self.test_model )
| 24 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase):
__lowercase : List[Any] = '''swin'''
__lowercase : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=96 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(__SCREAMING_SNAKE_CASE )
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__snake_case = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[int] = version.parse('''1.11''')
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 24 | 1 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Optional[int]:
'''simple docstring'''
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _UpperCamelCase (_lowerCamelCase : dict[int, list[int]] )-> list[tuple[int, int]]:
'''simple docstring'''
__snake_case = 0
__snake_case = len(_lowerCamelCase ) # No of vertices in graph
__snake_case = [0] * n
__snake_case = [False] * n
def dfs(_lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int ):
__snake_case = True
__snake_case = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , id_ )
__snake_case = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
__snake_case = min(low[at] , low[to] )
__snake_case = []
for i in range(_lowerCamelCase ):
if not visited[i]:
dfs(_lowerCamelCase , -1 , _lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = int(_lowerCamelCase )
__snake_case , __snake_case , __snake_case = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : List[Any]=3_00 )-> int:
'''simple docstring'''
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _UpperCamelCase (_lowerCamelCase : int )-> List[Any]:
'''simple docstring'''
__snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__snake_case = f'''{elt:.6f}''' if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(_lowerCamelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCAmelCase :
__lowercase : str = 5
__lowercase : Optional[Any] = 0.2
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 300 , ) -> List[Any]:
'''simple docstring'''
__snake_case = total
__snake_case = '''''' if prefix is None else prefix
__snake_case = leave
__snake_case = parent
__snake_case = width
__snake_case = None
__snake_case = None
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None ) -> Any:
'''simple docstring'''
__snake_case = value
if comment is not None:
__snake_case = comment
if self.last_value is None:
__snake_case = __snake_case = time.time()
__snake_case = __snake_case = value
__snake_case = __snake_case = None
__snake_case = self.warmup
__snake_case = 1
self.update_bar(__SCREAMING_SNAKE_CASE )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__snake_case = time.time()
__snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__snake_case = self.elapsed_time / (value - self.start_value)
else:
__snake_case = None
if value >= self.total:
__snake_case = self.total
__snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__SCREAMING_SNAKE_CASE )
__snake_case = value
__snake_case = current_time
if self.average_time_per_item is None:
__snake_case = 1
else:
__snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__SCREAMING_SNAKE_CASE ) )) + str(__SCREAMING_SNAKE_CASE )
if self.elapsed_time is None:
__snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__snake_case = None if column_names is None else [column_names]
__snake_case = None
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if self.inner_table is None:
__snake_case = [list(values.keys() ), list(values.values() )]
else:
__snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__SCREAMING_SNAKE_CASE )
__snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=300 ) -> List[str]:
'''simple docstring'''
__snake_case = NotebookProgressBar(__SCREAMING_SNAKE_CASE , prefix=__SCREAMING_SNAKE_CASE , parent=self , width=__SCREAMING_SNAKE_CASE )
return self.child_bar
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = None
self.display()
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self ) -> str:
'''simple docstring'''
__snake_case = None
__snake_case = None
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__snake_case = 0
__snake_case = 0
__snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__snake_case = NotebookTrainingTracker(state.max_steps , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if not has_length(__SCREAMING_SNAKE_CASE ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__snake_case = self.training_tracker.add_child(len(__SCREAMING_SNAKE_CASE ) )
else:
__snake_case = NotebookProgressBar(len(__SCREAMING_SNAKE_CASE ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__snake_case = state.global_step
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if self.training_tracker is not None:
__snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__snake_case = log['''loss''']
break
if self.first_column == "Epoch":
__snake_case = int(state.epoch )
else:
__snake_case = state.global_step
__snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__snake_case = re.sub(r'''\_loss$''' , '''''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''total_flos''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''epoch''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __SCREAMING_SNAKE_CASE )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__snake_case = v
else:
__snake_case = k.split('''_''' )
__snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
__snake_case = v
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
self.training_tracker.remove_child()
__snake_case = None
# Evaluation takes a long time so we should force the next update.
__snake_case = True
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__SCREAMING_SNAKE_CASE )
__snake_case = None
| 24 | 1 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ : Union[str, Any] = '''ResNetConfig'''
# Base docstring
UpperCAmelCase_ : Tuple = '''microsoft/resnet-50'''
UpperCAmelCase_ : List[Any] = [1, 2_0_4_8, 7, 7]
# Image classification docstring
UpperCAmelCase_ : Optional[Any] = '''microsoft/resnet-50'''
UpperCAmelCase_ : Dict = '''tiger cat'''
UpperCAmelCase_ : Optional[Any] = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class lowerCAmelCase ( nn.Module):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = "relu" ) -> List[Any]:
'''simple docstring'''
super().__init__()
__snake_case = nn.Convad(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , padding=kernel_size // 2 , bias=__SCREAMING_SNAKE_CASE )
__snake_case = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
__snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Tensor:
'''simple docstring'''
__snake_case = self.convolution(__SCREAMING_SNAKE_CASE )
__snake_case = self.normalization(__SCREAMING_SNAKE_CASE )
__snake_case = self.activation(__SCREAMING_SNAKE_CASE )
return hidden_state
class lowerCAmelCase ( nn.Module):
def __init__( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
super().__init__()
__snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__snake_case = config.num_channels
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Tensor:
'''simple docstring'''
__snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
__snake_case = self.embedder(__SCREAMING_SNAKE_CASE )
__snake_case = self.pooler(__SCREAMING_SNAKE_CASE )
return embedding
class lowerCAmelCase ( nn.Module):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 2 ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
__snake_case = nn.Convad(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 , stride=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE )
__snake_case = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Tensor:
'''simple docstring'''
__snake_case = self.convolution(__SCREAMING_SNAKE_CASE )
__snake_case = self.normalization(__SCREAMING_SNAKE_CASE )
return hidden_state
class lowerCAmelCase ( nn.Module):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = "relu" ) -> List[Any]:
'''simple docstring'''
super().__init__()
__snake_case = in_channels != out_channels or stride != 1
__snake_case = (
ResNetShortCut(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
__snake_case = nn.Sequential(
ResNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE ) , ResNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , activation=__SCREAMING_SNAKE_CASE ) , )
__snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = hidden_state
__snake_case = self.layer(__SCREAMING_SNAKE_CASE )
__snake_case = self.shortcut(__SCREAMING_SNAKE_CASE )
hidden_state += residual
__snake_case = self.activation(__SCREAMING_SNAKE_CASE )
return hidden_state
class lowerCAmelCase ( nn.Module):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = "relu" , __SCREAMING_SNAKE_CASE = 4 ) -> List[Any]:
'''simple docstring'''
super().__init__()
__snake_case = in_channels != out_channels or stride != 1
__snake_case = out_channels // reduction
__snake_case = (
ResNetShortCut(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
__snake_case = nn.Sequential(
ResNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ) , ResNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE ) , ResNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 , activation=__SCREAMING_SNAKE_CASE ) , )
__snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
__snake_case = hidden_state
__snake_case = self.layer(__SCREAMING_SNAKE_CASE )
__snake_case = self.shortcut(__SCREAMING_SNAKE_CASE )
hidden_state += residual
__snake_case = self.activation(__SCREAMING_SNAKE_CASE )
return hidden_state
class lowerCAmelCase ( nn.Module):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 2 , ) -> str:
'''simple docstring'''
super().__init__()
__snake_case = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
__snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , *[layer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Tensor:
'''simple docstring'''
__snake_case = input
for layer in self.layers:
__snake_case = layer(__SCREAMING_SNAKE_CASE )
return hidden_state
class lowerCAmelCase ( nn.Module):
def __init__( self , __SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
super().__init__()
__snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__SCREAMING_SNAKE_CASE , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__SCREAMING_SNAKE_CASE , config.depths[1:] ):
self.stages.append(ResNetStage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , depth=__SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
__snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__snake_case = hidden_states + (hidden_state,)
__snake_case = stage_module(__SCREAMING_SNAKE_CASE )
if output_hidden_states:
__snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=__SCREAMING_SNAKE_CASE , hidden_states=__SCREAMING_SNAKE_CASE , )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[Any] = ResNetConfig
__lowercase : Optional[int] = '''resnet'''
__lowercase : Dict = '''pixel_values'''
__lowercase : List[Any] = True
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(__SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> Any:
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = value
UpperCAmelCase_ : Any = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCAmelCase_ : str = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , __lowerCAmelCase , )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__snake_case = config
__snake_case = ResNetEmbeddings(__SCREAMING_SNAKE_CASE )
__snake_case = ResNetEncoder(__SCREAMING_SNAKE_CASE )
__snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
__snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.embedder(__SCREAMING_SNAKE_CASE )
__snake_case = self.encoder(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
__snake_case = encoder_outputs[0]
__snake_case = self.pooler(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__SCREAMING_SNAKE_CASE , pooler_output=__SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __lowerCAmelCase , )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__snake_case = config.num_labels
__snake_case = ResNetModel(__SCREAMING_SNAKE_CASE )
# classification head
__snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.resnet(__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
__snake_case = outputs.pooler_output if return_dict else outputs[1]
__snake_case = self.classifier(__SCREAMING_SNAKE_CASE )
__snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__snake_case = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__snake_case = '''single_label_classification'''
else:
__snake_case = '''multi_label_classification'''
if self.config.problem_type == "regression":
__snake_case = MSELoss()
if self.num_labels == 1:
__snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__snake_case = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif self.config.problem_type == "single_label_classification":
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__snake_case = BCEWithLogitsLoss()
__snake_case = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not return_dict:
__snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , __lowerCAmelCase , )
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
super()._init_backbone(__SCREAMING_SNAKE_CASE )
__snake_case = [config.embedding_size] + config.hidden_sizes
__snake_case = ResNetEmbeddings(__SCREAMING_SNAKE_CASE )
__snake_case = ResNetEncoder(__SCREAMING_SNAKE_CASE )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE )
@replace_return_docstrings(output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None ) -> BackboneOutput:
'''simple docstring'''
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case = self.embedder(__SCREAMING_SNAKE_CASE )
__snake_case = self.encoder(__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
__snake_case = outputs.hidden_states
__snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=__SCREAMING_SNAKE_CASE , )
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> int:
'''simple docstring'''
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : UNetaDModel
__lowercase : ScoreSdeVeScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 2000 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , **__SCREAMING_SNAKE_CASE , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__snake_case = self.unet.config.sample_size
__snake_case = (batch_size, 3, img_size, img_size)
__snake_case = self.unet
__snake_case = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ) * self.scheduler.init_noise_sigma
__snake_case = sample.to(self.device )
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
self.scheduler.set_sigmas(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__snake_case = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
__snake_case = self.scheduler.step_correct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# prediction step
__snake_case = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
__snake_case = self.scheduler.step_pred(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE )
__snake_case , __snake_case = output.prev_sample, output.prev_sample_mean
__snake_case = sample_mean.clamp(0 , 1 )
__snake_case = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 24 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = []
for part_id in partition_order:
__snake_case = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(2 )
__snake_case = [1, 0]
__snake_case = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(1 )
__snake_case = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__snake_case = lambda _lowerCamelCase : x.reverse()
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 24 | 1 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : float )-> float:
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float )-> float:
'''simple docstring'''
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float )-> float:
'''simple docstring'''
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float )-> float:
'''simple docstring'''
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> float:
'''simple docstring'''
__snake_case = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _UpperCamelCase ()-> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 1 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : str )-> List[str]:
'''simple docstring'''
__snake_case = old_name
if "patch_embed" in old_name:
__snake_case , __snake_case , __snake_case = old_name.split('''.''' )
if layer == "0":
__snake_case = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__snake_case = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__snake_case = old_name.replace('''3''' , '''convolution2''' )
else:
__snake_case = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , _lowerCamelCase ):
__snake_case = R'''\b\d{2}\b'''
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
__snake_case = re.search(R'''\d\.\d\d.''' , _lowerCamelCase ).group()
else:
__snake_case = re.search(R'''\d\.\d.''' , _lowerCamelCase ).group()
if int(match[0] ) < 6:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
__snake_case = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__snake_case = '''intermediate_stages.''' + trimmed_name
else:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__snake_case = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__snake_case = str(int(match[2] ) - num_meta4D_last_stage )
__snake_case = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__snake_case = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__snake_case = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__snake_case = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__snake_case = trimmed_name.replace('''fc2''' , '''linear_out''' )
__snake_case = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , _lowerCamelCase ):
__snake_case = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__snake_case = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__snake_case = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__snake_case = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__snake_case = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__snake_case = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__snake_case = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__snake_case = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__snake_case = new_name.replace('''norm''' , '''layernorm''' )
__snake_case = '''efficientformer.''' + new_name
else:
__snake_case = '''efficientformer.encoder.''' + new_name
return new_name
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
for key in checkpoint.copy().keys():
__snake_case = checkpoint.pop(_lowerCamelCase )
__snake_case = val
return checkpoint
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def _UpperCamelCase (_lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : bool )-> Optional[Any]:
'''simple docstring'''
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
__snake_case = EfficientFormerConfig.from_json_file(_lowerCamelCase )
__snake_case = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
__snake_case = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__snake_case = config.depths[-1] - config.num_metaad_blocks + 1
__snake_case = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__snake_case = prepare_img()
__snake_case = 2_56
__snake_case = 2_24
__snake_case = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__snake_case = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
__snake_case = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
__snake_case = model(_lowerCamelCase )
__snake_case = outputs.logits
__snake_case = (1, 10_00)
if "l1" in model_name:
__snake_case = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__snake_case = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__snake_case = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCamelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 24 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=False )-> Union[str, Any]:
'''simple docstring'''
try:
__snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case = strtobool(_lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCAmelCase_ : Dict = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCAmelCase_ : int = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCAmelCase_ : Tuple = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCAmelCase_ : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCAmelCase_ : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : Union[str, Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCAmelCase_ : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[Any]:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__snake_case = unittest.skip('''test requires faiss''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[str]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__snake_case = unittest.skip('''test requires regex''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__snake_case = unittest.skip('''test requires elasticsearch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__snake_case = unittest.skip('''test requires sqlalchemy''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__snake_case = unittest.skip('''test requires PyTorch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
if not config.TF_AVAILABLE:
__snake_case = unittest.skip('''test requires TensorFlow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
__snake_case = unittest.skip('''test requires JAX''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
if not config.PIL_AVAILABLE:
__snake_case = unittest.skip('''test requires Pillow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> Any:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Tuple:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> str:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Dict:
'''simple docstring'''
def _require_spacy_model(_lowerCamelCase : int ):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowerCamelCase ) )(_lowerCamelCase )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase (_lowerCamelCase : str )-> Dict:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> int:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case = unittest.skip('''test is slow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> Optional[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__snake_case = unittest.skip('''test is local''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : str )-> int:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case = unittest.skip('''test is packaged''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> str:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case = unittest.skip('''test requires remote''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (*_lowerCamelCase : str )-> Optional[int]:
'''simple docstring'''
def decorate(cls : Optional[Any] ):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase ) and name.startswith('''test''' ):
for decorator in decorators:
__snake_case = decorator(_lowerCamelCase )
setattr(cls , _lowerCamelCase , _lowerCamelCase )
return cls
return decorate
class lowerCAmelCase ( __lowerCAmelCase):
pass
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : List[str] = 0
__lowercase : Dict = 1
__lowercase : List[Any] = 2
@contextmanager
def _UpperCamelCase (_lowerCamelCase : Dict=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : Optional[int]=1E-16 )-> Tuple:
'''simple docstring'''
__snake_case = requests.Session().request
def timeout_request(_lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : str , **_lowerCamelCase : Any ):
# Change the url to an invalid url so that the connection hangs
__snake_case = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__snake_case = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case = url
__snake_case = e.args[0]
__snake_case = (max_retry_error.args[0].replace('''10.255.255.1''' , f'''OfflineMock[{url}]''' ),)
__snake_case = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , **_lowerCamelCase : Dict ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=_lowerCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowerCamelCase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _UpperCamelCase (*_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : List[str] )-> Any:
'''simple docstring'''
__snake_case = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase ) as tmp_dir:
try:
os.chdir(_lowerCamelCase )
yield
finally:
os.chdir(_lowerCamelCase )
@contextmanager
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : int )-> Any:
'''simple docstring'''
return deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist()
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : int , *_lowerCamelCase : int , **_lowerCamelCase : Optional[int] ):
try:
return func(*_lowerCamelCase , **_lowerCamelCase )
except HTTPError as err:
if str(_lowerCamelCase ).startswith('''500''' ) or str(_lowerCamelCase ).startswith('''502''' ):
pytest.xfail(str(_lowerCamelCase ) )
raise err
return decorator.decorator(_wrapper , _lowerCamelCase )
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = returncode
__snake_case = stdout
__snake_case = stderr
async def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] )-> Dict:
'''simple docstring'''
while True:
__snake_case = await stream.readline()
if line:
callback(_lowerCamelCase )
else:
break
async def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Dict=False , _lowerCamelCase : List[Any]=False )-> _RunOutput:
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(_lowerCamelCase ) )
__snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case = []
__snake_case = []
def tee(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Dict="" ):
__snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(_lowerCamelCase )
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label='''stderr:''' ) ),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[Any]=1_80 , _lowerCamelCase : Dict=False , _lowerCamelCase : int=True )-> _RunOutput:
'''simple docstring'''
__snake_case = asyncio.get_event_loop()
__snake_case = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase ) )
__snake_case = ''' '''.join(_lowerCamelCase )
if result.returncode > 0:
__snake_case = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
__snake_case = re.sub(R'''^gw''' , '''''' , _lowerCamelCase , 0 , re.M )
return int(_lowerCamelCase )
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = 2_95_00
__snake_case = pytest_xdist_worker_id()
return port + uniq_delta
| 24 | 1 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def _UpperCamelCase (_lowerCamelCase : float )-> float:
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
return quad(_lowerCamelCase , 0 , _lowerCamelCase , args=(_lowerCamelCase) )[0]
def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float )-> float:
'''simple docstring'''
return math.pow(_lowerCamelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__snake_case = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ : List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase_ : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
UpperCAmelCase_ : List[Any] = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
UpperCAmelCase_ : Any = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
UpperCAmelCase_ : List[Any] = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCAmelCase ( datasets.Metric):
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> Any:
'''simple docstring'''
if return_pvalue:
__snake_case = pearsonr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0] )}
| 24 |
'''simple docstring'''
import argparse
import os
import re
UpperCAmelCase_ : List[str] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCAmelCase_ : Tuple = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCAmelCase_ : Dict = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : bool = False )-> str:
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__snake_case = f.read()
__snake_case = content.split('''\n''' )
__snake_case = []
__snake_case = 0
while line_idx < len(_lowerCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__snake_case = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__snake_case = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__snake_case = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__snake_case = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : _re_identifier.search(_lowerCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_lowerCamelCase ) )
elif "\n".join(_lowerCamelCase ) != content:
return True
def _UpperCamelCase (_lowerCamelCase : bool = False )-> Tuple:
'''simple docstring'''
__snake_case = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for f in os.listdir(_lowerCamelCase ) if f.endswith('''.py''' )]
__snake_case = [sort_auto_mapping(_lowerCamelCase , overwrite=_lowerCamelCase ) for fname in fnames]
if not overwrite and any(_lowerCamelCase ):
__snake_case = [f for f, d in zip(_lowerCamelCase , _lowerCamelCase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_lowerCamelCase )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCAmelCase_ : List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 24 | 1 |
'''simple docstring'''
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = n
__snake_case = [None] * self.n
__snake_case = 0 # index of the first element
__snake_case = 0
__snake_case = 0
def __len__( self ) -> int:
'''simple docstring'''
return self.size
def lowerCAmelCase ( self ) -> bool:
'''simple docstring'''
return self.size == 0
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
__snake_case = data
__snake_case = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
if self.size == 0:
raise Exception('''UNDERFLOW''' )
__snake_case = self.array[self.front]
__snake_case = None
__snake_case = (self.front + 1) % self.n
self.size -= 1
return temp
| 24 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase (*_lowerCamelCase : str , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=2 )-> str:
'''simple docstring'''
from .. import __version__
__snake_case = take_from
__snake_case = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__snake_case = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case = call_frame.filename
__snake_case = call_frame.lineno
__snake_case = call_frame.function
__snake_case , __snake_case = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 24 | 1 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 24 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : str )-> List[str]:
'''simple docstring'''
__snake_case = old_name
if "patch_embed" in old_name:
__snake_case , __snake_case , __snake_case = old_name.split('''.''' )
if layer == "0":
__snake_case = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__snake_case = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__snake_case = old_name.replace('''3''' , '''convolution2''' )
else:
__snake_case = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , _lowerCamelCase ):
__snake_case = R'''\b\d{2}\b'''
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
__snake_case = re.search(R'''\d\.\d\d.''' , _lowerCamelCase ).group()
else:
__snake_case = re.search(R'''\d\.\d.''' , _lowerCamelCase ).group()
if int(match[0] ) < 6:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
__snake_case = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__snake_case = '''intermediate_stages.''' + trimmed_name
else:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__snake_case = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__snake_case = str(int(match[2] ) - num_meta4D_last_stage )
__snake_case = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__snake_case = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__snake_case = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__snake_case = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__snake_case = trimmed_name.replace('''fc2''' , '''linear_out''' )
__snake_case = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , _lowerCamelCase ):
__snake_case = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__snake_case = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__snake_case = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__snake_case = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__snake_case = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__snake_case = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__snake_case = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__snake_case = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__snake_case = new_name.replace('''norm''' , '''layernorm''' )
__snake_case = '''efficientformer.''' + new_name
else:
__snake_case = '''efficientformer.encoder.''' + new_name
return new_name
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
for key in checkpoint.copy().keys():
__snake_case = checkpoint.pop(_lowerCamelCase )
__snake_case = val
return checkpoint
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def _UpperCamelCase (_lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : bool )-> Optional[Any]:
'''simple docstring'''
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
__snake_case = EfficientFormerConfig.from_json_file(_lowerCamelCase )
__snake_case = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
__snake_case = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__snake_case = config.depths[-1] - config.num_metaad_blocks + 1
__snake_case = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__snake_case = prepare_img()
__snake_case = 2_56
__snake_case = 2_24
__snake_case = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__snake_case = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
__snake_case = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
__snake_case = model(_lowerCamelCase )
__snake_case = outputs.logits
__snake_case = (1, 10_00)
if "l1" in model_name:
__snake_case = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__snake_case = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__snake_case = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCamelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 24 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCamelCase (_lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float )-> np.array:
'''simple docstring'''
__snake_case = int(np.ceil((x_end - xa) / step_size ) )
__snake_case = np.zeros((n + 1,) )
__snake_case = ya
__snake_case = xa
for k in range(_lowerCamelCase ):
__snake_case = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
__snake_case = y[k] + (
(step_size / 2) * (ode_func(_lowerCamelCase , y[k] ) + ode_func(x + step_size , _lowerCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ) -> Tuple:
'''simple docstring'''
__snake_case = size if size is not None else {'''shortest_edge''': 20}
__snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_flip_channel_order
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Union[str, Any] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_flip_channel_order''' ) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : int = '''cvt'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[64, 192, 384] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 10] , __SCREAMING_SNAKE_CASE=[4.0, 4.0, 4.0] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.1] , __SCREAMING_SNAKE_CASE=[True, True, True] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=["dw_bn", "dw_bn", "dw_bn"] , __SCREAMING_SNAKE_CASE=[3, 3, 3] , __SCREAMING_SNAKE_CASE=[1, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 1, 1] , __SCREAMING_SNAKE_CASE=[1, 1, 1] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-12 , **__SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = num_channels
__snake_case = patch_sizes
__snake_case = patch_stride
__snake_case = patch_padding
__snake_case = embed_dim
__snake_case = num_heads
__snake_case = depth
__snake_case = mlp_ratio
__snake_case = attention_drop_rate
__snake_case = drop_rate
__snake_case = drop_path_rate
__snake_case = qkv_bias
__snake_case = cls_token
__snake_case = qkv_projection_method
__snake_case = kernel_qkv
__snake_case = padding_kv
__snake_case = stride_kv
__snake_case = padding_q
__snake_case = stride_q
__snake_case = initializer_range
__snake_case = layer_norm_eps
| 24 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "arrow" , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = load_from_cache_file
__snake_case = file_format
__snake_case = Spark(
df=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , working_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__snake_case = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__SCREAMING_SNAKE_CASE , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 24 | 1 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int )-> int:
'''simple docstring'''
return number | (1 << position)
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int )-> int:
'''simple docstring'''
return number & ~(1 << position)
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int )-> int:
'''simple docstring'''
return number ^ (1 << position)
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int )-> bool:
'''simple docstring'''
return ((number >> position) & 1) == 1
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int )-> int:
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Union[str, Any] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = LEDTokenizer
__lowercase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**__SCREAMING_SNAKE_CASE )
__snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case = '''post_processor'''
__snake_case = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
__snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case = tuple(state['''sep'''] )
if "cls" in state:
__snake_case = tuple(state['''cls'''] )
__snake_case = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = add_prefix_space
__snake_case = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
__snake_case = trim_offsets
__snake_case = True
if changes_to_apply:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
__snake_case = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
__snake_case = value
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
'''simple docstring'''
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> dict:
'''simple docstring'''
__snake_case = super()._pad(
encoded_inputs=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding_strategy=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
# Load from model defaults
if return_attention_mask is None:
__snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(__SCREAMING_SNAKE_CASE )
if needs_to_be_padded:
__snake_case = len(__SCREAMING_SNAKE_CASE ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 24 | 1 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCAmelCase ( nn.Module):
def __init__( self , __SCREAMING_SNAKE_CASE = 16 , __SCREAMING_SNAKE_CASE = 88 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 32 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "geglu" , __SCREAMING_SNAKE_CASE = None , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
__snake_case = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , num_layers=__SCREAMING_SNAKE_CASE , dropout=__SCREAMING_SNAKE_CASE , norm_num_groups=__SCREAMING_SNAKE_CASE , cross_attention_dim=__SCREAMING_SNAKE_CASE , attention_bias=__SCREAMING_SNAKE_CASE , sample_size=__SCREAMING_SNAKE_CASE , num_vector_embeds=__SCREAMING_SNAKE_CASE , activation_fn=__SCREAMING_SNAKE_CASE , num_embeds_ada_norm=__SCREAMING_SNAKE_CASE , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__snake_case = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__snake_case = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__snake_case = [1, 0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = True , ) -> List[str]:
'''simple docstring'''
__snake_case = hidden_states
__snake_case = []
__snake_case = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__snake_case = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__snake_case = self.transformer_index_for_condition[i]
__snake_case = self.transformers[transformer_index](
__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , cross_attention_kwargs=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__snake_case = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__snake_case = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__SCREAMING_SNAKE_CASE )
| 24 |
'''simple docstring'''
from collections import deque
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
__snake_case = len(_lowerCamelCase )
__snake_case = deque()
__snake_case = [False for _ in range(_lowerCamelCase )]
__snake_case = [-1 for _ in range(_lowerCamelCase )]
__snake_case = index_of[:]
def strong_connect(_lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
__snake_case = index # the number when this node is seen
__snake_case = index # lowest rank node reachable from here
index += 1
stack.append(_lowerCamelCase )
__snake_case = True
for w in g[v]:
if index_of[w] == -1:
__snake_case = strong_connect(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__snake_case = []
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
while w != v:
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
components.append(_lowerCamelCase )
return index
__snake_case = []
for v in range(_lowerCamelCase ):
if index_of[v] == -1:
strong_connect(_lowerCamelCase , 0 , _lowerCamelCase )
return components
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = [[] for _ in range(_lowerCamelCase )]
for u, v in edges:
g[u].append(_lowerCamelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ : List[str] = 7
UpperCAmelCase_ : int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ : List[str] = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ : Tuple = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 24 | 1 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _UpperCamelCase (_lowerCamelCase : Any )-> Any:
'''simple docstring'''
__snake_case = tmp_path / '''file.csv'''
__snake_case = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(_lowerCamelCase , '''w''' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = tmp_path / '''malformed_file.csv'''
__snake_case = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(_lowerCamelCase , '''w''' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def _UpperCamelCase (_lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = tmp_path / '''csv_with_image.csv'''
__snake_case = textwrap.dedent(
f'''\
image
{image_file}
''' )
with open(_lowerCamelCase , '''w''' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def _UpperCamelCase (_lowerCamelCase : Any )-> Tuple:
'''simple docstring'''
__snake_case = tmp_path / '''csv_with_label.csv'''
__snake_case = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(_lowerCamelCase , '''w''' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def _UpperCamelCase (_lowerCamelCase : Any )-> Union[str, Any]:
'''simple docstring'''
__snake_case = tmp_path / '''csv_with_int_list.csv'''
__snake_case = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(_lowerCamelCase , '''w''' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any )-> Union[str, Any]:
'''simple docstring'''
__snake_case = Csv()
__snake_case = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCamelCase , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(_lowerCamelCase ) in record.message
for record in caplog.records )
@require_pil
def _UpperCamelCase (_lowerCamelCase : Dict )-> Optional[Any]:
'''simple docstring'''
with open(_lowerCamelCase , encoding='''utf-8''' ) as f:
__snake_case = f.read().splitlines()[1]
__snake_case = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
__snake_case = csv._generate_tables([[csv_file_with_image]] )
__snake_case = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
__snake_case = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def _UpperCamelCase (_lowerCamelCase : Any )-> int:
'''simple docstring'''
with open(_lowerCamelCase , encoding='''utf-8''' ) as f:
__snake_case = f.read().splitlines()[1:]
__snake_case = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
__snake_case = csv._generate_tables([[csv_file_with_label]] )
__snake_case = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
__snake_case = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(_lowerCamelCase ) for label in labels]
def _UpperCamelCase (_lowerCamelCase : Tuple )-> Any:
'''simple docstring'''
__snake_case = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda _lowerCamelCase : [int(_lowerCamelCase ) for i in x.split()]} )
__snake_case = csv._generate_tables([[csv_file_with_int_list]] )
__snake_case = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
__snake_case = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 24 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = BarthezTokenizer
__lowercase : Any = BarthezTokenizerFast
__lowercase : Dict = True
__lowercase : Optional[int] = True
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = [0, 57, 3018, 7_0307, 91, 2]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
| 24 | 1 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _UpperCamelCase (_lowerCamelCase : Any )-> Any:
'''simple docstring'''
__snake_case = filter(lambda _lowerCamelCase : p.requires_grad , model.parameters() )
__snake_case = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase_ : Optional[Any] = logging.getLogger(__name__)
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : int )-> List[str]:
'''simple docstring'''
if metric == "rouge2":
__snake_case = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__snake_case = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__snake_case = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
__snake_case = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
''' function.''' )
__snake_case = ModelCheckpoint(
dirpath=_lowerCamelCase , filename=_lowerCamelCase , monitor=f'''val_{metric}''' , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : str )-> str:
'''simple docstring'''
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=_lowerCamelCase , verbose=_lowerCamelCase , )
class lowerCAmelCase ( pl.Callback):
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = {F'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__SCREAMING_SNAKE_CASE )
@rank_zero_only
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True ) -> None:
'''simple docstring'''
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
__snake_case = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
__snake_case = Path(pl_module.hparams.output_dir )
if type_path == "test":
__snake_case = od / '''test_results.txt'''
__snake_case = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__snake_case = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
__snake_case = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
generations_file.parent.mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , '''a+''' ) as writer:
for key in sorted(__SCREAMING_SNAKE_CASE ):
if key in ["log", "progress_bar", "preds"]:
continue
__snake_case = metrics[key]
if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ):
__snake_case = val.item()
__snake_case = F'''{key}: {val:.6f}\n'''
writer.write(__SCREAMING_SNAKE_CASE )
if not save_generations:
return
if "preds" in metrics:
__snake_case = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(__SCREAMING_SNAKE_CASE )
@rank_zero_only
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
try:
__snake_case = pl_module.model.model.num_parameters()
except AttributeError:
__snake_case = pl_module.model.num_parameters()
__snake_case = count_trainable_parameters(__SCREAMING_SNAKE_CASE )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''test''' )
@rank_zero_only
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 24 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ) -> Any:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Tuple = True
__lowercase : Optional[int] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__SCREAMING_SNAKE_CASE )
__snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class lowerCAmelCase ( unittest.TestCase):
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__snake_case = jnp.array([[0, 1, 2, 3, 4, 5]] )
__snake_case = model(__SCREAMING_SNAKE_CASE )[0]
__snake_case = 5_0000
__snake_case = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__snake_case = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 24 | 1 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCAmelCase_ : str = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = '''https://pypi.org/pypi/diffusers/json'''
__snake_case = json.loads(request.urlopen(_lowerCamelCase ).read() )['''releases'''].keys()
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : version.Version(_lowerCamelCase ) )
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
__snake_case = Path(_lowerCamelCase ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def _UpperCamelCase (_lowerCamelCase : Union[str, os.PathLike] )-> Optional[int]:
'''simple docstring'''
init_hf_modules()
__snake_case = Path(_lowerCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
__snake_case = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def _UpperCamelCase (_lowerCamelCase : Dict )-> Any:
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__snake_case = f.read()
# Imports of the form `import .xxx`
__snake_case = re.findall('''^\s*import\s+\.(\S+)\s*$''' , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , _lowerCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(_lowerCamelCase ) )
def _UpperCamelCase (_lowerCamelCase : Dict )-> Optional[Any]:
'''simple docstring'''
__snake_case = False
__snake_case = [module_file]
__snake_case = []
# Let's recurse through all relative imports
while not no_change:
__snake_case = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_lowerCamelCase ) )
__snake_case = Path(_lowerCamelCase ).parent
__snake_case = [str(module_path / m ) for m in new_imports]
__snake_case = [f for f in new_import_files if f not in all_relative_imports]
__snake_case = [f'''{f}.py''' for f in new_import_files]
__snake_case = len(_lowerCamelCase ) == 0
all_relative_imports.extend(_lowerCamelCase )
return all_relative_imports
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> int:
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__snake_case = f.read()
# Imports of the form `import xxx`
__snake_case = re.findall('''^\s*import\s+(\S+)\s*$''' , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , _lowerCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
__snake_case = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
__snake_case = list(set(_lowerCamelCase ) )
__snake_case = []
for imp in imports:
try:
importlib.import_module(_lowerCamelCase )
except ImportError:
missing_packages.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
f'''{", ".join(_lowerCamelCase )}. Run `pip install {" ".join(_lowerCamelCase )}`''' )
return get_relative_imports(_lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Dict )-> List[Any]:
'''simple docstring'''
__snake_case = module_path.replace(os.path.sep , '''.''' )
__snake_case = importlib.import_module(_lowerCamelCase )
if class_name is None:
return find_pipeline_class(_lowerCamelCase )
return getattr(_lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[str]:
'''simple docstring'''
from ..pipelines import DiffusionPipeline
__snake_case = dict(inspect.getmembers(_lowerCamelCase , inspect.isclass ) )
__snake_case = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _lowerCamelCase )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
f''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
f''' {loaded_module}.''' )
__snake_case = cls
return pipeline_class
def _UpperCamelCase (_lowerCamelCase : Union[str, os.PathLike] , _lowerCamelCase : str , _lowerCamelCase : Optional[Union[str, os.PathLike]] = None , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[Dict[str, str]] = None , _lowerCamelCase : Optional[Union[bool, str]] = None , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : bool = False , )-> str:
'''simple docstring'''
__snake_case = str(_lowerCamelCase )
__snake_case = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
__snake_case = module_file_or_url
__snake_case = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
__snake_case = get_diffusers_versions()
# cut ".dev0"
__snake_case = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
__snake_case = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(f'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
__snake_case = f'''v{revision}'''
elif revision == "main":
__snake_case = revision
else:
raise ValueError(
f'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
f''' {", ".join(available_versions + ["main"] )}.''' )
# community pipeline on GitHub
__snake_case = COMMUNITY_PIPELINES_URL.format(revision=_lowerCamelCase , pipeline=_lowerCamelCase )
try:
__snake_case = cached_download(
_lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
__snake_case = '''git'''
__snake_case = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(f'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
__snake_case = hf_hub_download(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
__snake_case = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(f'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
__snake_case = check_imports(_lowerCamelCase )
# Now we move the module inside our cached dynamic modules.
__snake_case = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_lowerCamelCase )
__snake_case = Path(_lowerCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_lowerCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
__snake_case = f'''{module_needed}.py'''
shutil.copy(os.path.join(_lowerCamelCase , _lowerCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case = use_auth_token
elif use_auth_token is True:
__snake_case = HfFolder.get_token()
else:
__snake_case = None
__snake_case = model_info(_lowerCamelCase , revision=_lowerCamelCase , token=_lowerCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__snake_case = submodule_path / commit_hash
__snake_case = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_lowerCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(_lowerCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_lowerCamelCase , f'''{module_needed}.py''' , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return os.path.join(_lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Union[str, os.PathLike] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[Union[str, os.PathLike]] = None , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[Dict[str, str]] = None , _lowerCamelCase : Optional[Union[bool, str]] = None , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : bool = False , **_lowerCamelCase : Optional[Any] , )-> Union[str, Any]:
'''simple docstring'''
__snake_case = get_cached_module_file(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return get_class_in_module(_lowerCamelCase , final_module.replace('''.py''' , '''''' ) )
| 24 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
__snake_case = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] )-> Tuple:
'''simple docstring'''
__snake_case = dct.pop(_lowerCamelCase )
__snake_case = val
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple )-> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__snake_case = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
__snake_case = qkv_bias
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Tuple )-> Dict:
'''simple docstring'''
__snake_case = 3_64 if '''coco''' in model_name else 2_24
__snake_case = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__snake_case = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=False )-> Dict:
'''simple docstring'''
__snake_case = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__snake_case = tokenizer('''\n''' , add_special_tokens=_lowerCamelCase ).input_ids[0]
__snake_case , __snake_case = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
__snake_case = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
__snake_case = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__snake_case , __snake_case = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case , __snake_case , __snake_case = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
__snake_case = original_model.state_dict()
__snake_case = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__snake_case = state_dict.pop(_lowerCamelCase )
if key.startswith('''Qformer.bert''' ):
__snake_case = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__snake_case = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__snake_case = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__snake_case = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__snake_case = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__snake_case = key.replace('''t5''' , '''language''' )
__snake_case = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__snake_case = load_demo_image()
__snake_case = vis_processors['''eval'''](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
__snake_case = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
# create processor
__snake_case = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__snake_case = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
__snake_case = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
__snake_case = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__snake_case = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__snake_case = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__snake_case = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
__snake_case = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__snake_case = ''''''
__snake_case = tokenizer(_lowerCamelCase , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
__snake_case = original_model.generate({'''image''': original_pixel_values} )
__snake_case = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , _lowerCamelCase )
__snake_case = input_ids.shape[1]
__snake_case = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
__snake_case = [text.strip() for text in output_text]
print('''HF generation:''' , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
UpperCAmelCase_ : Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCAmelCase_ : Tuple = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] )-> Optional[int]:
'''simple docstring'''
for attribute in key.split('''.''' ):
__snake_case = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
__snake_case = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
__snake_case = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__snake_case = value
elif weight_type == "weight_g":
__snake_case = value
elif weight_type == "weight_v":
__snake_case = value
elif weight_type == "bias":
__snake_case = value
elif weight_type == "running_mean":
__snake_case = value
elif weight_type == "running_var":
__snake_case = value
elif weight_type == "num_batches_tracked":
__snake_case = value
elif weight_type == "inv_freq":
__snake_case = value
else:
__snake_case = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : str )-> int:
'''simple docstring'''
__snake_case = []
__snake_case = fairseq_model.state_dict()
__snake_case = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__snake_case = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case = True
else:
for key, mapped_key in MAPPING.items():
__snake_case = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__snake_case = True
if "*" in mapped_key:
__snake_case = name.split(_lowerCamelCase )[0].split('''.''' )[-2]
__snake_case = mapped_key.replace('''*''' , _lowerCamelCase )
if "pos_bias_u" in name:
__snake_case = None
elif "pos_bias_v" in name:
__snake_case = None
elif "weight_g" in name:
__snake_case = '''weight_g'''
elif "weight_v" in name:
__snake_case = '''weight_v'''
elif "bias" in name:
__snake_case = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case = '''weight'''
elif "running_mean" in name:
__snake_case = '''running_mean'''
elif "inv_freq" in name:
__snake_case = '''inv_freq'''
elif "running_var" in name:
__snake_case = '''running_var'''
elif "num_batches_tracked" in name:
__snake_case = '''num_batches_tracked'''
else:
__snake_case = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] )-> Optional[Any]:
'''simple docstring'''
__snake_case = full_name.split('''conv_layers.''' )[-1]
__snake_case = name.split('''.''' )
__snake_case = int(items[0] )
__snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : int=None , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : str=True )-> Optional[Any]:
'''simple docstring'''
if config_path is not None:
__snake_case = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act='''swish''' )
else:
__snake_case = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__snake_case = '''rotary'''
if is_finetuned:
if dict_path:
__snake_case = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case = target_dict.pad_index
__snake_case = target_dict.bos_index
__snake_case = target_dict.eos_index
__snake_case = len(target_dict.symbols )
__snake_case = os.path.join(_lowerCamelCase , '''vocab.json''' )
if not os.path.isdir(_lowerCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
__snake_case = target_dict.indices
# fairseq has the <pad> and <s> switched
__snake_case = 0
__snake_case = 1
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
__snake_case = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_lowerCamelCase , )
__snake_case = True if config.feat_extract_norm == '''layer''' else False
__snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
__snake_case = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
__snake_case = WavaVecaConformerForCTC(_lowerCamelCase )
else:
__snake_case = WavaVecaConformerForPreTraining(_lowerCamelCase )
if is_finetuned:
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__snake_case = argparse.Namespace(task='''audio_pretraining''' )
__snake_case = fairseq.tasks.setup_task(_lowerCamelCase )
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
__snake_case = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCAmelCase_ : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 24 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case , __snake_case = image[0].size
__snake_case , __snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return image
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__snake_case , __snake_case = mask[0].size
__snake_case , __snake_case = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__snake_case = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = mask.astype(np.floataa ) / 255.0
__snake_case = 0
__snake_case = 1
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return mask
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : UNetaDModel
__lowercase : RePaintScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 250 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__snake_case = image
__snake_case = _preprocess_image(__SCREAMING_SNAKE_CASE )
__snake_case = original_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = _preprocess_mask(__SCREAMING_SNAKE_CASE )
__snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case = original_image.shape
__snake_case = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
__snake_case = eta
__snake_case = self.scheduler.timesteps[0] + 1
__snake_case = generator[0] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
__snake_case = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__snake_case = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = t
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 24 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
__snake_case = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] )-> Tuple:
'''simple docstring'''
__snake_case = dct.pop(_lowerCamelCase )
__snake_case = val
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple )-> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__snake_case = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
__snake_case = qkv_bias
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Tuple )-> Dict:
'''simple docstring'''
__snake_case = 3_64 if '''coco''' in model_name else 2_24
__snake_case = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__snake_case = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=False )-> Dict:
'''simple docstring'''
__snake_case = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__snake_case = tokenizer('''\n''' , add_special_tokens=_lowerCamelCase ).input_ids[0]
__snake_case , __snake_case = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
__snake_case = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
__snake_case = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__snake_case , __snake_case = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case , __snake_case , __snake_case = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
__snake_case = original_model.state_dict()
__snake_case = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__snake_case = state_dict.pop(_lowerCamelCase )
if key.startswith('''Qformer.bert''' ):
__snake_case = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__snake_case = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__snake_case = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__snake_case = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__snake_case = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__snake_case = key.replace('''t5''' , '''language''' )
__snake_case = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__snake_case = load_demo_image()
__snake_case = vis_processors['''eval'''](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
__snake_case = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
# create processor
__snake_case = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__snake_case = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
__snake_case = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
__snake_case = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__snake_case = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__snake_case = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__snake_case = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
__snake_case = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__snake_case = ''''''
__snake_case = tokenizer(_lowerCamelCase , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
__snake_case = original_model.generate({'''image''': original_pixel_values} )
__snake_case = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , _lowerCamelCase )
__snake_case = input_ids.shape[1]
__snake_case = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
__snake_case = [text.strip() for text in output_text]
print('''HF generation:''' , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
UpperCAmelCase_ : Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = {}
if "candidate_labels" in kwargs:
__snake_case = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__snake_case = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="This is a photo of {}." ) -> Optional[Any]:
'''simple docstring'''
__snake_case = load_image(__SCREAMING_SNAKE_CASE )
__snake_case = self.image_processor(images=[image] , return_tensors=self.framework )
__snake_case = candidate_labels
__snake_case = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__snake_case = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__snake_case = [text_inputs]
return inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = model_inputs.pop('''candidate_labels''' )
__snake_case = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = text_inputs[0]
else:
# Batching case.
__snake_case = text_inputs[0][0]
__snake_case = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = model_outputs.pop('''candidate_labels''' )
__snake_case = model_outputs['''logits'''][0]
if self.framework == "pt":
__snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
__snake_case = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [scores]
elif self.framework == "tf":
__snake_case = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__snake_case = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__snake_case = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 24 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : str = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 1 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
__snake_case = OmegaConf.load(_lowerCamelCase )
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
__snake_case = list(state_dict.keys() )
# extract state_dict for VQVAE
__snake_case = {}
__snake_case = '''first_stage_model.'''
for key in keys:
if key.startswith(_lowerCamelCase ):
__snake_case = state_dict[key]
# extract state_dict for UNetLDM
__snake_case = {}
__snake_case = '''model.diffusion_model.'''
for key in keys:
if key.startswith(_lowerCamelCase ):
__snake_case = state_dict[key]
__snake_case = config.model.params.first_stage_config.params
__snake_case = config.model.params.unet_config.params
__snake_case = VQModel(**_lowerCamelCase ).eval()
vqvae.load_state_dict(_lowerCamelCase )
__snake_case = UNetLDMModel(**_lowerCamelCase ).eval()
unet.load_state_dict(_lowerCamelCase )
__snake_case = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_lowerCamelCase , )
__snake_case = LDMPipeline(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
pipeline.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) )
def _UpperCamelCase ()-> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase : Callable , _lowerCamelCase : int ) -> None:
__snake_case = f'''{func.__name__}({value})'''
__snake_case = timeit(f'''__main__.{call}''' , setup='''import __main__''' )
print(f'''{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import random
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE = None ) -> Any:
'''simple docstring'''
__snake_case = value
__snake_case = random()
__snake_case = None
__snake_case = None
def __repr__( self ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{F'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
'''simple docstring'''
__snake_case = str(self.value ) + ''' '''
__snake_case = str(self.left or '''''' )
__snake_case = str(self.right or '''''' )
return value + left + right
def _UpperCamelCase (_lowerCamelCase : Node | None , _lowerCamelCase : int )-> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
__snake_case , __snake_case = split(root.left , _lowerCamelCase )
return left, root
else:
__snake_case , __snake_case = split(root.right , _lowerCamelCase )
return root, right
def _UpperCamelCase (_lowerCamelCase : Node | None , _lowerCamelCase : Node | None )-> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
__snake_case = merge(left.right , _lowerCamelCase )
return left
else:
__snake_case = merge(_lowerCamelCase , right.left )
return right
def _UpperCamelCase (_lowerCamelCase : Node | None , _lowerCamelCase : int )-> Node | None:
'''simple docstring'''
__snake_case = Node(_lowerCamelCase )
__snake_case , __snake_case = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Node | None , _lowerCamelCase : int )-> Node | None:
'''simple docstring'''
__snake_case , __snake_case = split(_lowerCamelCase , value - 1 )
__snake_case , __snake_case = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Node | None )-> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def _UpperCamelCase (_lowerCamelCase : Node | None , _lowerCamelCase : str )-> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
__snake_case = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
__snake_case = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def _UpperCamelCase ()-> None:
'''simple docstring'''
__snake_case = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
__snake_case = input()
while args != "q":
__snake_case = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
__snake_case = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__snake_case = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_lowerCamelCase ) , '''Postfix'''.center(_lowerCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
__snake_case = ''')''' # change "(" to ")"
elif infix[i] == ")":
__snake_case = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase_ : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 24 | 1 |
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCAmelCase_ : Tuple = '''\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
'''
UpperCAmelCase_ : Optional[Any] = '''\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
'''
UpperCAmelCase_ : int = '''
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"precision": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'precision@10\': 1.0}
'''
def _UpperCamelCase (_lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] )-> str:
'''simple docstring'''
return float((preds == labels).mean() )
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : List[str] )-> Tuple:
'''simple docstring'''
__snake_case = simple_accuracy(_lowerCamelCase , _lowerCamelCase )
__snake_case = float(fa_score(y_true=_lowerCamelCase , y_pred=_lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = np.array(_lowerCamelCase )
__snake_case = np.array(_lowerCamelCase )
__snake_case = en_sentvecs.shape[0]
# mean centering
__snake_case = en_sentvecs - np.mean(_lowerCamelCase , axis=0 )
__snake_case = in_sentvecs - np.mean(_lowerCamelCase , axis=0 )
__snake_case = cdist(_lowerCamelCase , _lowerCamelCase , '''cosine''' )
__snake_case = np.array(range(_lowerCamelCase ) )
__snake_case = sim.argsort(axis=1 )[:, :10]
__snake_case = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCAmelCase ( datasets.Metric):
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 24 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase):
__lowercase : List[Any] = '''swin'''
__lowercase : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=96 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(__SCREAMING_SNAKE_CASE )
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__snake_case = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[int] = version.parse('''1.11''')
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 24 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
__snake_case = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__SCREAMING_SNAKE_CASE ) , x.transpose() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
__snake_case = torch.tensor(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(__SCREAMING_SNAKE_CASE ) , transpose(__SCREAMING_SNAKE_CASE ).numpy() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = torch.tensor(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(__SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , transpose(__SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
__snake_case = tf.constant(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(__SCREAMING_SNAKE_CASE ) , transpose(__SCREAMING_SNAKE_CASE ).numpy() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = tf.constant(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(__SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , transpose(__SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
__snake_case = jnp.array(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(__SCREAMING_SNAKE_CASE ) , np.asarray(transpose(__SCREAMING_SNAKE_CASE ) ) ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = jnp.array(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(__SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , np.asarray(transpose(__SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) ) ) )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__SCREAMING_SNAKE_CASE , (4, 3) ) , np.reshape(__SCREAMING_SNAKE_CASE , (4, 3) ) ) )
__snake_case = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__SCREAMING_SNAKE_CASE , (12, 5) ) , np.reshape(__SCREAMING_SNAKE_CASE , (12, 5) ) ) )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
__snake_case = torch.tensor(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(__SCREAMING_SNAKE_CASE , (4, 3) ) , reshape(__SCREAMING_SNAKE_CASE , (4, 3) ).numpy() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = torch.tensor(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(__SCREAMING_SNAKE_CASE , (12, 5) ) , reshape(__SCREAMING_SNAKE_CASE , (12, 5) ).numpy() ) )
@require_tf
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
__snake_case = tf.constant(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(__SCREAMING_SNAKE_CASE , (4, 3) ) , reshape(__SCREAMING_SNAKE_CASE , (4, 3) ).numpy() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = tf.constant(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(__SCREAMING_SNAKE_CASE , (12, 5) ) , reshape(__SCREAMING_SNAKE_CASE , (12, 5) ).numpy() ) )
@require_flax
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
__snake_case = jnp.array(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(__SCREAMING_SNAKE_CASE , (4, 3) ) , np.asarray(reshape(__SCREAMING_SNAKE_CASE , (4, 3) ) ) ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = jnp.array(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(__SCREAMING_SNAKE_CASE , (12, 5) ) , np.asarray(reshape(__SCREAMING_SNAKE_CASE , (12, 5) ) ) ) )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__SCREAMING_SNAKE_CASE ) , np.squeeze(__SCREAMING_SNAKE_CASE ) ) )
__snake_case = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__SCREAMING_SNAKE_CASE , axis=2 ) , np.squeeze(__SCREAMING_SNAKE_CASE , axis=2 ) ) )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = np.random.randn(1 , 3 , 4 )
__snake_case = torch.tensor(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(__SCREAMING_SNAKE_CASE ) , squeeze(__SCREAMING_SNAKE_CASE ).numpy() ) )
__snake_case = np.random.randn(1 , 4 , 1 , 5 )
__snake_case = torch.tensor(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(__SCREAMING_SNAKE_CASE , axis=2 ) , squeeze(__SCREAMING_SNAKE_CASE , axis=2 ).numpy() ) )
@require_tf
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = np.random.randn(1 , 3 , 4 )
__snake_case = tf.constant(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(__SCREAMING_SNAKE_CASE ) , squeeze(__SCREAMING_SNAKE_CASE ).numpy() ) )
__snake_case = np.random.randn(1 , 4 , 1 , 5 )
__snake_case = tf.constant(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(__SCREAMING_SNAKE_CASE , axis=2 ) , squeeze(__SCREAMING_SNAKE_CASE , axis=2 ).numpy() ) )
@require_flax
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = np.random.randn(1 , 3 , 4 )
__snake_case = jnp.array(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(__SCREAMING_SNAKE_CASE ) , np.asarray(squeeze(__SCREAMING_SNAKE_CASE ) ) ) )
__snake_case = np.random.randn(1 , 4 , 1 , 5 )
__snake_case = jnp.array(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(__SCREAMING_SNAKE_CASE , axis=2 ) , np.asarray(squeeze(__SCREAMING_SNAKE_CASE , axis=2 ) ) ) )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__SCREAMING_SNAKE_CASE , axis=1 ) , np.expand_dims(__SCREAMING_SNAKE_CASE , axis=1 ) ) )
@require_torch
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
__snake_case = torch.tensor(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(__SCREAMING_SNAKE_CASE , axis=1 ) , expand_dims(__SCREAMING_SNAKE_CASE , axis=1 ).numpy() ) )
@require_tf
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
__snake_case = tf.constant(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(__SCREAMING_SNAKE_CASE , axis=1 ) , expand_dims(__SCREAMING_SNAKE_CASE , axis=1 ).numpy() ) )
@require_flax
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
__snake_case = jnp.array(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(__SCREAMING_SNAKE_CASE , axis=1 ) , np.asarray(expand_dims(__SCREAMING_SNAKE_CASE , axis=1 ) ) ) )
| 24 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = int(_lowerCamelCase )
__snake_case , __snake_case , __snake_case = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : List[Any]=3_00 )-> int:
'''simple docstring'''
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _UpperCamelCase (_lowerCamelCase : int )-> List[Any]:
'''simple docstring'''
__snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__snake_case = f'''{elt:.6f}''' if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(_lowerCamelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCAmelCase :
__lowercase : str = 5
__lowercase : Optional[Any] = 0.2
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 300 , ) -> List[Any]:
'''simple docstring'''
__snake_case = total
__snake_case = '''''' if prefix is None else prefix
__snake_case = leave
__snake_case = parent
__snake_case = width
__snake_case = None
__snake_case = None
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None ) -> Any:
'''simple docstring'''
__snake_case = value
if comment is not None:
__snake_case = comment
if self.last_value is None:
__snake_case = __snake_case = time.time()
__snake_case = __snake_case = value
__snake_case = __snake_case = None
__snake_case = self.warmup
__snake_case = 1
self.update_bar(__SCREAMING_SNAKE_CASE )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__snake_case = time.time()
__snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__snake_case = self.elapsed_time / (value - self.start_value)
else:
__snake_case = None
if value >= self.total:
__snake_case = self.total
__snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__SCREAMING_SNAKE_CASE )
__snake_case = value
__snake_case = current_time
if self.average_time_per_item is None:
__snake_case = 1
else:
__snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__SCREAMING_SNAKE_CASE ) )) + str(__SCREAMING_SNAKE_CASE )
if self.elapsed_time is None:
__snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__snake_case = None if column_names is None else [column_names]
__snake_case = None
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if self.inner_table is None:
__snake_case = [list(values.keys() ), list(values.values() )]
else:
__snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__SCREAMING_SNAKE_CASE )
__snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=300 ) -> List[str]:
'''simple docstring'''
__snake_case = NotebookProgressBar(__SCREAMING_SNAKE_CASE , prefix=__SCREAMING_SNAKE_CASE , parent=self , width=__SCREAMING_SNAKE_CASE )
return self.child_bar
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = None
self.display()
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self ) -> str:
'''simple docstring'''
__snake_case = None
__snake_case = None
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__snake_case = 0
__snake_case = 0
__snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__snake_case = NotebookTrainingTracker(state.max_steps , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if not has_length(__SCREAMING_SNAKE_CASE ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__snake_case = self.training_tracker.add_child(len(__SCREAMING_SNAKE_CASE ) )
else:
__snake_case = NotebookProgressBar(len(__SCREAMING_SNAKE_CASE ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__snake_case = state.global_step
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if self.training_tracker is not None:
__snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__snake_case = log['''loss''']
break
if self.first_column == "Epoch":
__snake_case = int(state.epoch )
else:
__snake_case = state.global_step
__snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__snake_case = re.sub(r'''\_loss$''' , '''''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''total_flos''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''epoch''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __SCREAMING_SNAKE_CASE )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__snake_case = v
else:
__snake_case = k.split('''_''' )
__snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
__snake_case = v
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
self.training_tracker.remove_child()
__snake_case = None
# Evaluation takes a long time so we should force the next update.
__snake_case = True
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__SCREAMING_SNAKE_CASE )
__snake_case = None
| 24 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase):
__lowercase : List[Any] = '''swin'''
__lowercase : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=96 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(__SCREAMING_SNAKE_CASE )
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__snake_case = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[int] = version.parse('''1.11''')
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> int:
'''simple docstring'''
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = []
for part_id in partition_order:
__snake_case = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(2 )
__snake_case = [1, 0]
__snake_case = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(1 )
__snake_case = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__snake_case = lambda _lowerCamelCase : x.reverse()
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 24 | 1 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : str = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : List[str] = XGLMTokenizer
__lowercase : Union[str, Any] = XGLMTokenizerFast
__lowercase : Optional[int] = True
__lowercase : List[str] = True
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case = XGLMTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 1008 )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = XGLMTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__snake_case = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__SCREAMING_SNAKE_CASE , f.name )
__snake_case = XGLMTokenizer(f.name , keep_accents=__SCREAMING_SNAKE_CASE )
__snake_case = pickle.dumps(__SCREAMING_SNAKE_CASE )
pickle.loads(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = '''Hello World!'''
__snake_case = [2, 3_1227, 4447, 35]
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@slow
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
__snake_case = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@slow
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = {
'''input_ids''': [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''facebook/xglm-564M''' , padding=__SCREAMING_SNAKE_CASE , )
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> float:
'''simple docstring'''
__snake_case = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _UpperCamelCase ()-> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 1 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase_ : List[Any] = sys.version_info >= (3, 1_0)
def _UpperCamelCase (_lowerCamelCase : Optional[int]=None , _lowerCamelCase : str=None )-> List[str]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_lowerCamelCase )
@dataclass
class lowerCAmelCase :
__lowercase : int
__lowercase : float
__lowercase : str
__lowercase : bool
@dataclass
class lowerCAmelCase :
__lowercase : int = 42
__lowercase : str = field(default='''toto''' , metadata={'''help''': '''help message'''})
@dataclass
class lowerCAmelCase :
__lowercase : bool = False
__lowercase : bool = True
__lowercase : Optional[bool] = None
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Tuple = '''titi'''
__lowercase : Any = '''toto'''
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Union[str, Any] = '''titi'''
__lowercase : Dict = '''toto'''
__lowercase : str = 42
@dataclass
class lowerCAmelCase :
__lowercase : BasicEnum = "toto"
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = BasicEnum(self.foo )
@dataclass
class lowerCAmelCase :
__lowercase : MixedTypeEnum = "toto"
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = MixedTypeEnum(self.foo )
@dataclass
class lowerCAmelCase :
__lowercase : Optional[int] = None
__lowercase : Optional[float] = field(default=__lowerCAmelCase , metadata={'''help''': '''help message'''})
__lowercase : Optional[str] = None
__lowercase : Optional[List[str]] = list_field(default=[])
__lowercase : Optional[List[int]] = list_field(default=[])
@dataclass
class lowerCAmelCase :
__lowercase : List[int] = list_field(default=[])
__lowercase : List[int] = list_field(default=[1, 2, 3])
__lowercase : List[str] = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''])
__lowercase : List[float] = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class lowerCAmelCase :
__lowercase : List[int] = field()
__lowercase : str = field()
__lowercase : BasicEnum = field()
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = BasicEnum(self.required_enum )
@dataclass
class lowerCAmelCase :
__lowercase : int
__lowercase : "BasicEnum" = field()
__lowercase : "Optional[bool]" = None
__lowercase : "str" = field(default='''toto''' , metadata={'''help''': '''help message'''})
__lowercase : "List[str]" = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''])
if is_python_no_less_than_3_10:
@dataclass
class lowerCAmelCase :
__lowercase : bool = False
__lowercase : bool = True
__lowercase : bool | None = None
@dataclass
class lowerCAmelCase :
__lowercase : int | None = None
__lowercase : float | None = field(default=__lowerCAmelCase , metadata={'''help''': '''help message'''})
__lowercase : str | None = None
__lowercase : list[str] | None = list_field(default=[])
__lowercase : list[int] | None = list_field(default=[])
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__snake_case = {k: v for k, v in vars(__SCREAMING_SNAKE_CASE ).items() if k != '''container'''}
__snake_case = {k: v for k, v in vars(__SCREAMING_SNAKE_CASE ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , __SCREAMING_SNAKE_CASE ) and yy.get('''choices''' , __SCREAMING_SNAKE_CASE ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](__SCREAMING_SNAKE_CASE ) , yy['''type'''](__SCREAMING_SNAKE_CASE ) )
del xx["type"], yy["type"]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__snake_case = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--bar''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--baz''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--flag''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='''?''' )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((__snake_case) , ) = parser.parse_args_into_dataclasses(__SCREAMING_SNAKE_CASE , look_for_args_file=__SCREAMING_SNAKE_CASE )
self.assertFalse(example.flag )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__snake_case = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--baz''' , default='''toto''' , type=__SCREAMING_SNAKE_CASE , help='''help message''' )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='''?''' )
expected.add_argument('''--baz''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=__SCREAMING_SNAKE_CASE , dest='''baz''' )
expected.add_argument('''--opt''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE )
__snake_case = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
__snake_case = HfArgumentParser(__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = parser.parse_args([] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
__snake_case = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
__snake_case = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
__snake_case = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
__snake_case = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__snake_case = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__snake_case = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__snake_case = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__snake_case = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__snake_case = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
__snake_case = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
@dataclass
class lowerCAmelCase :
__lowercase : Literal["titi", "toto", 42] = "toto"
__snake_case = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__snake_case = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__snake_case = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__snake_case = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__snake_case = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = parser.parse_args([] )
self.assertEqual(
__SCREAMING_SNAKE_CASE , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
__snake_case = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--bar''' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help='''help message''' )
expected.add_argument('''--baz''' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__SCREAMING_SNAKE_CASE )
__snake_case = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
__snake_case = HfArgumentParser(__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = parser.parse_args([] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , bar=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , ces=[] , des=[] ) )
__snake_case = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__snake_case = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--required_str''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__SCREAMING_SNAKE_CASE , )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__snake_case = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__SCREAMING_SNAKE_CASE , )
expected.add_argument('''--opt''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--baz''' , default='''toto''' , type=__SCREAMING_SNAKE_CASE , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__snake_case = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
__snake_case = parser.parse_dict(__SCREAMING_SNAKE_CASE )[0]
__snake_case = BasicExample(**__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__snake_case = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(__SCREAMING_SNAKE_CASE , parser.parse_dict , __SCREAMING_SNAKE_CASE , allow_extra_keys=__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__snake_case = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case = os.path.join(__SCREAMING_SNAKE_CASE , '''temp_json''' )
os.mkdir(__SCREAMING_SNAKE_CASE )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
__snake_case = BasicExample(**__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__snake_case = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case = os.path.join(__SCREAMING_SNAKE_CASE , '''temp_yaml''' )
os.mkdir(__SCREAMING_SNAKE_CASE )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
__snake_case = BasicExample(**__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = HfArgumentParser(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 24 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=False )-> Union[str, Any]:
'''simple docstring'''
try:
__snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case = strtobool(_lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCAmelCase_ : Dict = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCAmelCase_ : int = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCAmelCase_ : Tuple = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCAmelCase_ : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCAmelCase_ : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : Union[str, Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCAmelCase_ : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[Any]:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__snake_case = unittest.skip('''test requires faiss''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[str]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__snake_case = unittest.skip('''test requires regex''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__snake_case = unittest.skip('''test requires elasticsearch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__snake_case = unittest.skip('''test requires sqlalchemy''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__snake_case = unittest.skip('''test requires PyTorch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
if not config.TF_AVAILABLE:
__snake_case = unittest.skip('''test requires TensorFlow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
__snake_case = unittest.skip('''test requires JAX''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
if not config.PIL_AVAILABLE:
__snake_case = unittest.skip('''test requires Pillow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> Any:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Tuple:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> str:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Dict:
'''simple docstring'''
def _require_spacy_model(_lowerCamelCase : int ):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowerCamelCase ) )(_lowerCamelCase )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase (_lowerCamelCase : str )-> Dict:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> int:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case = unittest.skip('''test is slow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> Optional[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__snake_case = unittest.skip('''test is local''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : str )-> int:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case = unittest.skip('''test is packaged''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> str:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case = unittest.skip('''test requires remote''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (*_lowerCamelCase : str )-> Optional[int]:
'''simple docstring'''
def decorate(cls : Optional[Any] ):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase ) and name.startswith('''test''' ):
for decorator in decorators:
__snake_case = decorator(_lowerCamelCase )
setattr(cls , _lowerCamelCase , _lowerCamelCase )
return cls
return decorate
class lowerCAmelCase ( __lowerCAmelCase):
pass
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : List[str] = 0
__lowercase : Dict = 1
__lowercase : List[Any] = 2
@contextmanager
def _UpperCamelCase (_lowerCamelCase : Dict=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : Optional[int]=1E-16 )-> Tuple:
'''simple docstring'''
__snake_case = requests.Session().request
def timeout_request(_lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : str , **_lowerCamelCase : Any ):
# Change the url to an invalid url so that the connection hangs
__snake_case = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__snake_case = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case = url
__snake_case = e.args[0]
__snake_case = (max_retry_error.args[0].replace('''10.255.255.1''' , f'''OfflineMock[{url}]''' ),)
__snake_case = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , **_lowerCamelCase : Dict ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=_lowerCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowerCamelCase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _UpperCamelCase (*_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : List[str] )-> Any:
'''simple docstring'''
__snake_case = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase ) as tmp_dir:
try:
os.chdir(_lowerCamelCase )
yield
finally:
os.chdir(_lowerCamelCase )
@contextmanager
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : int )-> Any:
'''simple docstring'''
return deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist()
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : int , *_lowerCamelCase : int , **_lowerCamelCase : Optional[int] ):
try:
return func(*_lowerCamelCase , **_lowerCamelCase )
except HTTPError as err:
if str(_lowerCamelCase ).startswith('''500''' ) or str(_lowerCamelCase ).startswith('''502''' ):
pytest.xfail(str(_lowerCamelCase ) )
raise err
return decorator.decorator(_wrapper , _lowerCamelCase )
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = returncode
__snake_case = stdout
__snake_case = stderr
async def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] )-> Dict:
'''simple docstring'''
while True:
__snake_case = await stream.readline()
if line:
callback(_lowerCamelCase )
else:
break
async def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Dict=False , _lowerCamelCase : List[Any]=False )-> _RunOutput:
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(_lowerCamelCase ) )
__snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case = []
__snake_case = []
def tee(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Dict="" ):
__snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(_lowerCamelCase )
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label='''stderr:''' ) ),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[Any]=1_80 , _lowerCamelCase : Dict=False , _lowerCamelCase : int=True )-> _RunOutput:
'''simple docstring'''
__snake_case = asyncio.get_event_loop()
__snake_case = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase ) )
__snake_case = ''' '''.join(_lowerCamelCase )
if result.returncode > 0:
__snake_case = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
__snake_case = re.sub(R'''^gw''' , '''''' , _lowerCamelCase , 0 , re.M )
return int(_lowerCamelCase )
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = 2_95_00
__snake_case = pytest_xdist_worker_id()
return port + uniq_delta
| 24 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Dict = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__snake_case = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ : List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase_ : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : int = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
'''simple docstring'''
import argparse
import os
import re
UpperCAmelCase_ : List[str] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCAmelCase_ : Tuple = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCAmelCase_ : Dict = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : bool = False )-> str:
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__snake_case = f.read()
__snake_case = content.split('''\n''' )
__snake_case = []
__snake_case = 0
while line_idx < len(_lowerCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__snake_case = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__snake_case = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__snake_case = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__snake_case = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : _re_identifier.search(_lowerCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_lowerCamelCase ) )
elif "\n".join(_lowerCamelCase ) != content:
return True
def _UpperCamelCase (_lowerCamelCase : bool = False )-> Tuple:
'''simple docstring'''
__snake_case = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for f in os.listdir(_lowerCamelCase ) if f.endswith('''.py''' )]
__snake_case = [sort_auto_mapping(_lowerCamelCase , overwrite=_lowerCamelCase ) for fname in fnames]
if not overwrite and any(_lowerCamelCase ):
__snake_case = [f for f, d in zip(_lowerCamelCase , _lowerCamelCase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_lowerCamelCase )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCAmelCase_ : List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 24 | 1 |
'''simple docstring'''
import os
UpperCAmelCase_ : Tuple = {'''I''': 1, '''V''': 5, '''X''': 1_0, '''L''': 5_0, '''C''': 1_0_0, '''D''': 5_0_0, '''M''': 1_0_0_0}
def _UpperCamelCase (_lowerCamelCase : str )-> int:
'''simple docstring'''
__snake_case = 0
__snake_case = 0
while index < len(_lowerCamelCase ) - 1:
__snake_case = SYMBOLS[numerals[index]]
__snake_case = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _UpperCamelCase (_lowerCamelCase : int )-> str:
'''simple docstring'''
__snake_case = ''''''
__snake_case = num // 10_00
numerals += m_count * "M"
num %= 10_00
__snake_case = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
__snake_case = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _UpperCamelCase (_lowerCamelCase : str = "/p089_roman.txt" )-> int:
'''simple docstring'''
__snake_case = 0
with open(os.path.dirname(_lowerCamelCase ) + roman_numerals_filename ) as filea:
__snake_case = filea.readlines()
for line in lines:
__snake_case = line.strip()
__snake_case = parse_roman_numerals(_lowerCamelCase )
__snake_case = generate_roman_numerals(_lowerCamelCase )
savings += len(_lowerCamelCase ) - len(_lowerCamelCase )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase (*_lowerCamelCase : str , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=2 )-> str:
'''simple docstring'''
from .. import __version__
__snake_case = take_from
__snake_case = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__snake_case = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case = call_frame.filename
__snake_case = call_frame.lineno
__snake_case = call_frame.function
__snake_case , __snake_case = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 24 | 1 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowerCAmelCase ( nn.Module):
def __init__( self ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__snake_case = nn.Linear(3 , 4 )
__snake_case = nn.BatchNormad(4 )
__snake_case = nn.Linear(4 , 5 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__SCREAMING_SNAKE_CASE ) ) )
class lowerCAmelCase ( __lowerCAmelCase):
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class lowerCAmelCase ( __lowerCAmelCase):
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
return output + 1
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = ModelForTest()
__snake_case = ModelHook()
add_hook_to_module(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(test_model._hf_hook , __SCREAMING_SNAKE_CASE )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(__SCREAMING_SNAKE_CASE )
self.assertFalse(hasattr(__SCREAMING_SNAKE_CASE , '''_hf_hook''' ) )
self.assertFalse(hasattr(__SCREAMING_SNAKE_CASE , '''_old_forward''' ) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = ModelForTest()
__snake_case = ModelHook()
add_hook_to_module(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
add_hook_to_module(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , append=__SCREAMING_SNAKE_CASE )
self.assertEqual(isinstance(test_model._hf_hook , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(__SCREAMING_SNAKE_CASE )
self.assertFalse(hasattr(__SCREAMING_SNAKE_CASE , '''_hf_hook''' ) )
self.assertFalse(hasattr(__SCREAMING_SNAKE_CASE , '''_old_forward''' ) )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = ModelForTest()
__snake_case = torch.randn(2 , 3 )
__snake_case = test_model(x + 1 )
__snake_case = test_model(x + 2 )
__snake_case = PreForwardHook()
add_hook_to_module(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = test_model(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case = PreForwardHook()
add_hook_to_module(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = test_model(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = test_model(__SCREAMING_SNAKE_CASE )
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-5 )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ModelForTest()
__snake_case = torch.randn(2 , 3 )
__snake_case = test_model(__SCREAMING_SNAKE_CASE )
__snake_case = PostForwardHook()
add_hook_to_module(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = test_model(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case = PostForwardHook()
add_hook_to_module(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = test_model(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = test_model(__SCREAMING_SNAKE_CASE )
assert torch.allclose(__SCREAMING_SNAKE_CASE , output + 2 , atol=1E-5 )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = ModelForTest()
__snake_case = torch.randn(2 , 3 )
__snake_case = test_model(__SCREAMING_SNAKE_CASE )
__snake_case = PostForwardHook()
add_hook_to_module(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = test_model(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__snake_case = True
__snake_case = test_model(__SCREAMING_SNAKE_CASE )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case = torch.randn(2 , 3 )
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__SCREAMING_SNAKE_CASE , AlignDevicesHook(io_same_device=__SCREAMING_SNAKE_CASE ) )
__snake_case = torch.randn(2 , 3 ).to(0 )
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.assertEqual(output.device , torch.device(0 ) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
__snake_case = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__SCREAMING_SNAKE_CASE ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__SCREAMING_SNAKE_CASE ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__SCREAMING_SNAKE_CASE ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , __SCREAMING_SNAKE_CASE )
__snake_case = torch.randn(2 , 3 )
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.assertEqual(output.device , __SCREAMING_SNAKE_CASE )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
__snake_case = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__SCREAMING_SNAKE_CASE ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__SCREAMING_SNAKE_CASE ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__SCREAMING_SNAKE_CASE ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
__snake_case = torch.randn(2 , 3 )
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.assertEqual(output.device , __SCREAMING_SNAKE_CASE )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
__snake_case = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(__SCREAMING_SNAKE_CASE , execution_device=__SCREAMING_SNAKE_CASE , offload=__SCREAMING_SNAKE_CASE )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case = torch.device(__SCREAMING_SNAKE_CASE )
self.assertEqual(model.batchnorm.running_mean.device , __SCREAMING_SNAKE_CASE )
__snake_case = torch.randn(2 , 3 )
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.assertEqual(output.device , __SCREAMING_SNAKE_CASE )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__SCREAMING_SNAKE_CASE )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(__SCREAMING_SNAKE_CASE , execution_device=__SCREAMING_SNAKE_CASE , offload=__SCREAMING_SNAKE_CASE , offload_buffers=__SCREAMING_SNAKE_CASE )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
__snake_case = torch.randn(2 , 3 )
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.assertEqual(output.device , __SCREAMING_SNAKE_CASE )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__SCREAMING_SNAKE_CASE )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
__snake_case = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
__SCREAMING_SNAKE_CASE , execution_device=__SCREAMING_SNAKE_CASE , offload=__SCREAMING_SNAKE_CASE , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case = torch.device(__SCREAMING_SNAKE_CASE )
self.assertEqual(model.batchnorm.running_mean.device , __SCREAMING_SNAKE_CASE )
__snake_case = torch.randn(2 , 3 )
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.assertEqual(output.device , __SCREAMING_SNAKE_CASE )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__SCREAMING_SNAKE_CASE )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__SCREAMING_SNAKE_CASE , execution_device=__SCREAMING_SNAKE_CASE , offload=__SCREAMING_SNAKE_CASE , weights_map=model.state_dict() , offload_buffers=__SCREAMING_SNAKE_CASE , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
__snake_case = torch.randn(2 , 3 )
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.assertEqual(output.device , __SCREAMING_SNAKE_CASE )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__SCREAMING_SNAKE_CASE )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
| 24 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : str )-> List[str]:
'''simple docstring'''
__snake_case = old_name
if "patch_embed" in old_name:
__snake_case , __snake_case , __snake_case = old_name.split('''.''' )
if layer == "0":
__snake_case = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__snake_case = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__snake_case = old_name.replace('''3''' , '''convolution2''' )
else:
__snake_case = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , _lowerCamelCase ):
__snake_case = R'''\b\d{2}\b'''
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
__snake_case = re.search(R'''\d\.\d\d.''' , _lowerCamelCase ).group()
else:
__snake_case = re.search(R'''\d\.\d.''' , _lowerCamelCase ).group()
if int(match[0] ) < 6:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
__snake_case = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__snake_case = '''intermediate_stages.''' + trimmed_name
else:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__snake_case = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__snake_case = str(int(match[2] ) - num_meta4D_last_stage )
__snake_case = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__snake_case = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__snake_case = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__snake_case = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__snake_case = trimmed_name.replace('''fc2''' , '''linear_out''' )
__snake_case = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , _lowerCamelCase ):
__snake_case = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__snake_case = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__snake_case = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__snake_case = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__snake_case = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__snake_case = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__snake_case = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__snake_case = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__snake_case = new_name.replace('''norm''' , '''layernorm''' )
__snake_case = '''efficientformer.''' + new_name
else:
__snake_case = '''efficientformer.encoder.''' + new_name
return new_name
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
for key in checkpoint.copy().keys():
__snake_case = checkpoint.pop(_lowerCamelCase )
__snake_case = val
return checkpoint
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def _UpperCamelCase (_lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : bool )-> Optional[Any]:
'''simple docstring'''
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
__snake_case = EfficientFormerConfig.from_json_file(_lowerCamelCase )
__snake_case = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
__snake_case = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__snake_case = config.depths[-1] - config.num_metaad_blocks + 1
__snake_case = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__snake_case = prepare_img()
__snake_case = 2_56
__snake_case = 2_24
__snake_case = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__snake_case = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
__snake_case = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
__snake_case = model(_lowerCamelCase )
__snake_case = outputs.logits
__snake_case = (1, 10_00)
if "l1" in model_name:
__snake_case = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__snake_case = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__snake_case = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCamelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 24 | 1 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = {}
if "candidate_labels" in kwargs:
__snake_case = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__snake_case = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="This is a photo of {}." ) -> Optional[Any]:
'''simple docstring'''
__snake_case = load_image(__SCREAMING_SNAKE_CASE )
__snake_case = self.image_processor(images=[image] , return_tensors=self.framework )
__snake_case = candidate_labels
__snake_case = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__snake_case = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__snake_case = [text_inputs]
return inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = model_inputs.pop('''candidate_labels''' )
__snake_case = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = text_inputs[0]
else:
# Batching case.
__snake_case = text_inputs[0][0]
__snake_case = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = model_outputs.pop('''candidate_labels''' )
__snake_case = model_outputs['''logits'''][0]
if self.framework == "pt":
__snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
__snake_case = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [scores]
elif self.framework == "tf":
__snake_case = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__snake_case = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__snake_case = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 24 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ) -> Tuple:
'''simple docstring'''
__snake_case = size if size is not None else {'''shortest_edge''': 20}
__snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_flip_channel_order
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Union[str, Any] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_flip_channel_order''' ) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCAmelCase_ : Optional[Any] = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Optional[int] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
__snake_case = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__snake_case = bs[:]
__snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
__snake_case = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def _UpperCamelCase (_lowerCamelCase : Any )-> Dict:
'''simple docstring'''
__snake_case = set()
__snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__snake_case = char
return pairs
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Any = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) -> List[str]:
'''simple docstring'''
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else bos_token
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else eos_token
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else sep_token
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cls_token
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else unk_token
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as vocab_handle:
__snake_case = json.load(__SCREAMING_SNAKE_CASE )
__snake_case = {v: k for k, v in self.encoder.items()}
__snake_case = errors # how to handle errors in decoding
__snake_case = bytes_to_unicode()
__snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as merges_handle:
__snake_case = merges_handle.read().split('''\n''' )[1:-1]
__snake_case = [tuple(merge.split() ) for merge in bpe_merges]
__snake_case = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__snake_case = {}
__snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__snake_case = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__snake_case = tuple(__SCREAMING_SNAKE_CASE )
__snake_case = get_pairs(__SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
__snake_case = min(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case , __snake_case = bigram
__snake_case = []
__snake_case = 0
while i < len(__SCREAMING_SNAKE_CASE ):
try:
__snake_case = word.index(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case = j
if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case = tuple(__SCREAMING_SNAKE_CASE )
__snake_case = new_word
if len(__SCREAMING_SNAKE_CASE ) == 1:
break
else:
__snake_case = get_pairs(__SCREAMING_SNAKE_CASE )
__snake_case = ''' '''.join(__SCREAMING_SNAKE_CASE )
__snake_case = word
return word
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__snake_case = []
for token in re.findall(self.pat , __SCREAMING_SNAKE_CASE ):
__snake_case = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__SCREAMING_SNAKE_CASE ).split(''' ''' ) )
return bpe_tokens
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return self.decoder.get(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
__snake_case = ''''''.join(__SCREAMING_SNAKE_CASE )
__snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE ) + '''\n''' )
__snake_case = 0
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
__snake_case = token_index
writer.write(''' '''.join(__SCREAMING_SNAKE_CASE ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__SCREAMING_SNAKE_CASE ) > 0 and not text[0].isspace()):
__snake_case = ''' ''' + text
return (text, kwargs)
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> dict:
'''simple docstring'''
__snake_case = super()._pad(
encoded_inputs=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding_strategy=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
# Load from model defaults
if return_attention_mask is None:
__snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(__SCREAMING_SNAKE_CASE )
if needs_to_be_padded:
__snake_case = len(__SCREAMING_SNAKE_CASE ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 24 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "arrow" , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = load_from_cache_file
__snake_case = file_format
__snake_case = Spark(
df=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , working_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__snake_case = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__SCREAMING_SNAKE_CASE , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 24 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Any = '''roberta'''
def __init__( self , __SCREAMING_SNAKE_CASE=5_0265 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class lowerCAmelCase ( __lowerCAmelCase):
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__snake_case = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__snake_case = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 24 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Union[str, Any] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = LEDTokenizer
__lowercase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**__SCREAMING_SNAKE_CASE )
__snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case = '''post_processor'''
__snake_case = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
__snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case = tuple(state['''sep'''] )
if "cls" in state:
__snake_case = tuple(state['''cls'''] )
__snake_case = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = add_prefix_space
__snake_case = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
__snake_case = trim_offsets
__snake_case = True
if changes_to_apply:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
__snake_case = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
__snake_case = value
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
'''simple docstring'''
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> dict:
'''simple docstring'''
__snake_case = super()._pad(
encoded_inputs=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding_strategy=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
# Load from model defaults
if return_attention_mask is None:
__snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(__SCREAMING_SNAKE_CASE )
if needs_to_be_padded:
__snake_case = len(__SCREAMING_SNAKE_CASE ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 24 | 1 |
'''simple docstring'''
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class lowerCAmelCase ( __lowerCAmelCase):
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Optional[Any]:
'''simple docstring'''
__snake_case = max_length
__snake_case = max_position_embeddings
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
__snake_case = input_ids.shape[-1]
__snake_case = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
'''with `max_length = start_length + max_new_tokens` instead.''' , __SCREAMING_SNAKE_CASE , )
__snake_case = start_length
__snake_case = max_new_tokens
__snake_case = start_length + max_new_tokens
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[str]:
'''simple docstring'''
__snake_case = max_time
__snake_case = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class lowerCAmelCase ( __lowerCAmelCase):
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
return any(criteria(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for criteria in self )
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
for stopping_criterium in self:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return stopping_criterium.max_length
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return stopping_criterium.max_length
return None
def _UpperCamelCase (_lowerCamelCase : StoppingCriteriaList , _lowerCamelCase : int )-> StoppingCriteriaList:
'''simple docstring'''
__snake_case = stopping_criteria.max_length
__snake_case = deepcopy(_lowerCamelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , _lowerCamelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase ) )
return new_stopping_criteria
| 24 |
'''simple docstring'''
from collections import deque
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
__snake_case = len(_lowerCamelCase )
__snake_case = deque()
__snake_case = [False for _ in range(_lowerCamelCase )]
__snake_case = [-1 for _ in range(_lowerCamelCase )]
__snake_case = index_of[:]
def strong_connect(_lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
__snake_case = index # the number when this node is seen
__snake_case = index # lowest rank node reachable from here
index += 1
stack.append(_lowerCamelCase )
__snake_case = True
for w in g[v]:
if index_of[w] == -1:
__snake_case = strong_connect(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__snake_case = []
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
while w != v:
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
components.append(_lowerCamelCase )
return index
__snake_case = []
for v in range(_lowerCamelCase ):
if index_of[v] == -1:
strong_connect(_lowerCamelCase , 0 , _lowerCamelCase )
return components
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = [[] for _ in range(_lowerCamelCase )]
for u, v in edges:
g[u].append(_lowerCamelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ : List[str] = 7
UpperCAmelCase_ : int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ : List[str] = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ : Tuple = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 24 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : jnp.ndarray
@flax_register_to_config
class lowerCAmelCase ( nn.Module , __lowerCAmelCase , __lowerCAmelCase):
__lowercase : int = 32
__lowercase : int = 4
__lowercase : int = 4
__lowercase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__lowercase : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__lowercase : Union[bool, Tuple[bool]] = False
__lowercase : Tuple[int] = (320, 640, 1280, 1280)
__lowercase : int = 2
__lowercase : Union[int, Tuple[int]] = 8
__lowercase : Optional[Union[int, Tuple[int]]] = None
__lowercase : int = 1280
__lowercase : float = 0.0
__lowercase : bool = False
__lowercase : jnp.dtype = jnp.floataa
__lowercase : bool = True
__lowercase : int = 0
__lowercase : bool = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> FrozenDict:
'''simple docstring'''
__snake_case = (1, self.in_channels, self.sample_size, self.sample_size)
__snake_case = jnp.zeros(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__snake_case = jnp.ones((1,) , dtype=jnp.intaa )
__snake_case = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__snake_case , __snake_case = jax.random.split(__SCREAMING_SNAKE_CASE )
__snake_case = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )["params"]
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.block_out_channels
__snake_case = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__snake_case = self.num_attention_heads or self.attention_head_dim
# input
__snake_case = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__snake_case = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__snake_case = FlaxTimestepEmbedding(__SCREAMING_SNAKE_CASE , dtype=self.dtype )
__snake_case = self.only_cross_attention
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = (num_attention_heads,) * len(self.down_block_types )
# down
__snake_case = []
__snake_case = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__snake_case = output_channel
__snake_case = block_out_channels[i]
__snake_case = i == len(__SCREAMING_SNAKE_CASE ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__snake_case = FlaxCrossAttnDownBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__snake_case = FlaxDownBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__SCREAMING_SNAKE_CASE )
__snake_case = down_blocks
# mid
__snake_case = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__snake_case = []
__snake_case = list(reversed(__SCREAMING_SNAKE_CASE ) )
__snake_case = list(reversed(__SCREAMING_SNAKE_CASE ) )
__snake_case = list(reversed(__SCREAMING_SNAKE_CASE ) )
__snake_case = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__snake_case = output_channel
__snake_case = reversed_block_out_channels[i]
__snake_case = reversed_block_out_channels[min(i + 1 , len(__SCREAMING_SNAKE_CASE ) - 1 )]
__snake_case = i == len(__SCREAMING_SNAKE_CASE ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__snake_case = FlaxCrossAttnUpBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , prev_output_channel=__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__snake_case = FlaxUpBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , prev_output_channel=__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__SCREAMING_SNAKE_CASE )
__snake_case = output_channel
__snake_case = up_blocks
# out
__snake_case = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__snake_case = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , jnp.ndarray ):
__snake_case = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__SCREAMING_SNAKE_CASE , jnp.ndarray ) and len(timesteps.shape ) == 0:
__snake_case = timesteps.astype(dtype=jnp.floataa )
__snake_case = jnp.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
__snake_case = self.time_proj(__SCREAMING_SNAKE_CASE )
__snake_case = self.time_embedding(__SCREAMING_SNAKE_CASE )
# 2. pre-process
__snake_case = jnp.transpose(__SCREAMING_SNAKE_CASE , (0, 2, 3, 1) )
__snake_case = self.conv_in(__SCREAMING_SNAKE_CASE )
# 3. down
__snake_case = (sample,)
for down_block in self.down_blocks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case , __snake_case = down_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
else:
__snake_case , __snake_case = down_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__snake_case = ()
for down_block_res_sample, down_block_additional_residual in zip(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__snake_case = new_down_block_res_samples
# 4. mid
__snake_case = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__snake_case = down_block_res_samples[-(self.layers_per_block + 1) :]
__snake_case = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = up_block(
__SCREAMING_SNAKE_CASE , temb=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , res_hidden_states_tuple=__SCREAMING_SNAKE_CASE , deterministic=not train , )
else:
__snake_case = up_block(__SCREAMING_SNAKE_CASE , temb=__SCREAMING_SNAKE_CASE , res_hidden_states_tuple=__SCREAMING_SNAKE_CASE , deterministic=not train )
# 6. post-process
__snake_case = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
__snake_case = nn.silu(__SCREAMING_SNAKE_CASE )
__snake_case = self.conv_out(__SCREAMING_SNAKE_CASE )
__snake_case = jnp.transpose(__SCREAMING_SNAKE_CASE , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__SCREAMING_SNAKE_CASE )
| 24 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = BarthezTokenizer
__lowercase : Any = BarthezTokenizerFast
__lowercase : Dict = True
__lowercase : Optional[int] = True
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = [0, 57, 3018, 7_0307, 91, 2]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
| 24 | 1 |
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _UpperCamelCase (_lowerCamelCase : int = 3 )-> qiskit.result.counts.Counts:
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_lowerCamelCase ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
__snake_case = QuantumRegister(_lowerCamelCase , '''qr''' )
__snake_case = ClassicalRegister(_lowerCamelCase , '''cr''' )
__snake_case = QuantumCircuit(_lowerCamelCase , _lowerCamelCase )
__snake_case = number_of_qubits
for i in range(_lowerCamelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_lowerCamelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _lowerCamelCase , _lowerCamelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_lowerCamelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_lowerCamelCase , _lowerCamelCase )
# simulate with 10000 shots
__snake_case = Aer.get_backend('''qasm_simulator''' )
__snake_case = execute(_lowerCamelCase , _lowerCamelCase , shots=1_00_00 )
return job.result().get_counts(_lowerCamelCase )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 24 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ) -> Any:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Tuple = True
__lowercase : Optional[int] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__SCREAMING_SNAKE_CASE )
__snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class lowerCAmelCase ( unittest.TestCase):
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__snake_case = jnp.array([[0, 1, 2, 3, 4, 5]] )
__snake_case = model(__SCREAMING_SNAKE_CASE )[0]
__snake_case = 5_0000
__snake_case = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__snake_case = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 24 | 1 |
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase_ : Optional[Any] = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase_ : Any = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
UpperCAmelCase_ : Union[str, Any] = spec.loader.load_module()
UpperCAmelCase_ : str = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCAmelCase_ : Optional[Any] = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
UpperCAmelCase_ : int = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = []
for config_class in list(CONFIG_MAPPING.values() ):
__snake_case = False
# source code of `config_class`
__snake_case = inspect.getsource(_lowerCamelCase )
__snake_case = _re_checkpoint.findall(_lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
__snake_case , __snake_case = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
__snake_case = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
__snake_case = True
break
__snake_case = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
__snake_case = '''\n'''.join(sorted(_lowerCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 24 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
__snake_case = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] )-> Tuple:
'''simple docstring'''
__snake_case = dct.pop(_lowerCamelCase )
__snake_case = val
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple )-> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__snake_case = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
__snake_case = qkv_bias
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Tuple )-> Dict:
'''simple docstring'''
__snake_case = 3_64 if '''coco''' in model_name else 2_24
__snake_case = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__snake_case = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=False )-> Dict:
'''simple docstring'''
__snake_case = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__snake_case = tokenizer('''\n''' , add_special_tokens=_lowerCamelCase ).input_ids[0]
__snake_case , __snake_case = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
__snake_case = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
__snake_case = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__snake_case , __snake_case = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case , __snake_case , __snake_case = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
__snake_case = original_model.state_dict()
__snake_case = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__snake_case = state_dict.pop(_lowerCamelCase )
if key.startswith('''Qformer.bert''' ):
__snake_case = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__snake_case = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__snake_case = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__snake_case = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__snake_case = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__snake_case = key.replace('''t5''' , '''language''' )
__snake_case = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__snake_case = load_demo_image()
__snake_case = vis_processors['''eval'''](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
__snake_case = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
# create processor
__snake_case = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__snake_case = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
__snake_case = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
__snake_case = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__snake_case = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__snake_case = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__snake_case = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
__snake_case = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__snake_case = ''''''
__snake_case = tokenizer(_lowerCamelCase , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
__snake_case = original_model.generate({'''image''': original_pixel_values} )
__snake_case = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , _lowerCamelCase )
__snake_case = input_ids.shape[1]
__snake_case = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
__snake_case = [text.strip() for text in output_text]
print('''HF generation:''' , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
UpperCAmelCase_ : Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
__lowercase : Union[str, Any] = CycleDiffusionPipeline
__lowercase : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
__lowercase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
__lowercase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''})
__lowercase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowercase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__snake_case = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__snake_case = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__snake_case = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> Optional[Any]:
'''simple docstring'''
__snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__snake_case = image / 2 + 0.5
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
__snake_case = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__snake_case = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__snake_case = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = CycleDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
__snake_case = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__snake_case = pipe(**__SCREAMING_SNAKE_CASE )
__snake_case = output.images
__snake_case = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__snake_case = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.get_dummy_components()
for name, module in components.items():
if hasattr(__SCREAMING_SNAKE_CASE , '''half''' ):
__snake_case = module.half()
__snake_case = CycleDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
__snake_case = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__snake_case = pipe(**__SCREAMING_SNAKE_CASE )
__snake_case = output.images
__snake_case = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__snake_case = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__snake_case = init_image.resize((512, 512) )
__snake_case = '''CompVis/stable-diffusion-v1-4'''
__snake_case = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder='''scheduler''' )
__snake_case = CycleDiffusionPipeline.from_pretrained(
__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
__snake_case = '''A black colored car'''
__snake_case = '''A blue colored car'''
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=__SCREAMING_SNAKE_CASE , source_prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
__snake_case = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__snake_case = init_image.resize((512, 512) )
__snake_case = '''CompVis/stable-diffusion-v1-4'''
__snake_case = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder='''scheduler''' )
__snake_case = CycleDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
__snake_case = '''A black colored car'''
__snake_case = '''A blue colored car'''
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=__SCREAMING_SNAKE_CASE , source_prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
__snake_case = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 24 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case , __snake_case = image[0].size
__snake_case , __snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return image
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__snake_case , __snake_case = mask[0].size
__snake_case , __snake_case = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__snake_case = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = mask.astype(np.floataa ) / 255.0
__snake_case = 0
__snake_case = 1
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return mask
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : UNetaDModel
__lowercase : RePaintScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 250 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__snake_case = image
__snake_case = _preprocess_image(__SCREAMING_SNAKE_CASE )
__snake_case = original_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = _preprocess_mask(__SCREAMING_SNAKE_CASE )
__snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case = original_image.shape
__snake_case = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
__snake_case = eta
__snake_case = self.scheduler.timesteps[0] + 1
__snake_case = generator[0] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
__snake_case = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__snake_case = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = t
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 24 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[Any] = '''beit'''
def __init__( self , __SCREAMING_SNAKE_CASE=8192 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-12 , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[3, 5, 7, 11] , __SCREAMING_SNAKE_CASE=[1, 2, 3, 6] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.4 , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=255 , **__SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = use_mask_token
__snake_case = use_absolute_position_embeddings
__snake_case = use_relative_position_bias
__snake_case = use_shared_relative_position_bias
__snake_case = layer_scale_init_value
__snake_case = drop_path_rate
__snake_case = use_mean_pooling
# decode head attributes (semantic segmentation)
__snake_case = out_indices
__snake_case = pool_scales
# auxiliary head attributes (semantic segmentation)
__snake_case = use_auxiliary_head
__snake_case = auxiliary_loss_weight
__snake_case = auxiliary_channels
__snake_case = auxiliary_num_convs
__snake_case = auxiliary_concat_input
__snake_case = semantic_loss_ignore_index
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[int] = version.parse('''1.11''')
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 24 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = {}
if "candidate_labels" in kwargs:
__snake_case = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__snake_case = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="This is a photo of {}." ) -> Optional[Any]:
'''simple docstring'''
__snake_case = load_image(__SCREAMING_SNAKE_CASE )
__snake_case = self.image_processor(images=[image] , return_tensors=self.framework )
__snake_case = candidate_labels
__snake_case = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__snake_case = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__snake_case = [text_inputs]
return inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = model_inputs.pop('''candidate_labels''' )
__snake_case = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = text_inputs[0]
else:
# Batching case.
__snake_case = text_inputs[0][0]
__snake_case = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = model_outputs.pop('''candidate_labels''' )
__snake_case = model_outputs['''logits'''][0]
if self.framework == "pt":
__snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
__snake_case = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [scores]
elif self.framework == "tf":
__snake_case = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__snake_case = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__snake_case = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 24 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__snake_case = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__snake_case = tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
__snake_case = tf_top_k_top_p_filtering(__SCREAMING_SNAKE_CASE , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__snake_case = output[output != -float('''inf''' )]
__snake_case = tf.cast(
tf.where(tf.not_equal(__SCREAMING_SNAKE_CASE , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1E-12 )
tf.debugging.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@require_tf
class lowerCAmelCase ( unittest.TestCase , __lowerCAmelCase):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
__lowercase : Tuple = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__snake_case = 2
__snake_case = 2
class lowerCAmelCase ( tf.Module):
def __init__( self , __SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
super(__SCREAMING_SNAKE_CASE , self ).__init__()
__snake_case = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = self.model.generate(
input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , max_new_tokens=__SCREAMING_SNAKE_CASE , return_dict_in_generate=__SCREAMING_SNAKE_CASE , )
return {"sequences": outputs["sequences"]}
__snake_case = [[2, 0], [102, 103]]
__snake_case = [[1, 0], [1, 1]]
__snake_case = DummyModel(model=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , signatures={'''serving_default''': dummy_model.serving} )
__snake_case = tf.saved_model.load(__SCREAMING_SNAKE_CASE ).signatures['''serving_default''']
for batch_size in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 ):
__snake_case = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
__snake_case = serving_func(**__SCREAMING_SNAKE_CASE )['''sequences''']
__snake_case = test_model.generate(**__SCREAMING_SNAKE_CASE , max_new_tokens=__SCREAMING_SNAKE_CASE )
tf.debugging.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__snake_case = 1
__snake_case = 2
class lowerCAmelCase ( tf.Module):
def __init__( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super(__SCREAMING_SNAKE_CASE , self ).__init__()
__snake_case = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__snake_case = self.model.generate(
input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , max_new_tokens=__SCREAMING_SNAKE_CASE , return_dict_in_generate=__SCREAMING_SNAKE_CASE , )
return {"sequences": outputs["sequences"]}
__snake_case = [[2], [102, 103]]
__snake_case = [[1], [1, 1]]
__snake_case = DummyModel(model=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , signatures={'''serving_default''': dummy_model.serving} )
__snake_case = tf.saved_model.load(__SCREAMING_SNAKE_CASE ).signatures['''serving_default''']
for input_row in range(len(__SCREAMING_SNAKE_CASE ) ):
__snake_case = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
__snake_case = serving_func(**__SCREAMING_SNAKE_CASE )['''sequences''']
__snake_case = test_model.generate(**__SCREAMING_SNAKE_CASE , max_new_tokens=__SCREAMING_SNAKE_CASE )
tf.debugging.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
@require_tensorflow_text
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=__SCREAMING_SNAKE_CASE )
class lowerCAmelCase ( tf.keras.layers.Layer):
def __init__( self ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__snake_case = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__SCREAMING_SNAKE_CASE , '''spiece.model''' ) , '''rb''' ).read() )
__snake_case = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = self.tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case , __snake_case = text.pad_model_inputs(
__SCREAMING_SNAKE_CASE , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__snake_case = self.model.generate(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
return self.tokenizer.detokenize(__SCREAMING_SNAKE_CASE )
__snake_case = CompleteSentenceTransformer()
__snake_case = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
__snake_case = complete_model(__SCREAMING_SNAKE_CASE )
__snake_case = tf.keras.Model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
keras_model.save(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
__snake_case = 14
__snake_case = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__snake_case = '''Hello, my dog is cute and'''
__snake_case = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
__snake_case = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__snake_case = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
__snake_case = model.generate(**__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__snake_case = [638, 198]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
__snake_case = model.generate(**__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
__snake_case = '''Hugging Face is a technology company based in New York and Paris.'''
__snake_case = bart_tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' ).input_ids
__snake_case = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
__snake_case = bart_model.generate(__SCREAMING_SNAKE_CASE ).numpy()
class lowerCAmelCase ( __lowerCAmelCase):
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return super().call(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
__snake_case = bart_model.generate(__SCREAMING_SNAKE_CASE , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
class lowerCAmelCase ( bart_model.model.encoder.__class__):
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return super().call(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = FakeEncoder(bart_model.config , bart_model.model.shared )
__snake_case = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__snake_case = bart_model.generate(__SCREAMING_SNAKE_CASE ).numpy()
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__SCREAMING_SNAKE_CASE , foo='''bar''' )
| 24 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 1 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase_ : int = '''docs/source/en/_toctree.yml'''
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = defaultdict(_lowerCamelCase )
__snake_case = []
__snake_case = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(_lowerCamelCase )
__snake_case = new_doc_list
__snake_case = [key for key, value in counts.items() if value > 1]
__snake_case = []
for duplicate_key in duplicates:
__snake_case = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(_lowerCamelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
__snake_case = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCamelCase ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(_lowerCamelCase )
# Sort
return overview_doc
def _UpperCamelCase (_lowerCamelCase : Tuple=False )-> Any:
'''simple docstring'''
with open(_lowerCamelCase , encoding='''utf-8''' ) as f:
__snake_case = yaml.safe_load(f.read() )
# Get to the API doc
__snake_case = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case = content[api_idx]['''sections''']
# Then to the model doc
__snake_case = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__snake_case = api_doc[scheduler_idx]['''sections''']
__snake_case = clean_doc_toc(_lowerCamelCase )
__snake_case = False
if new_scheduler_doc != scheduler_doc:
__snake_case = True
if overwrite:
__snake_case = new_scheduler_doc
if diff:
if overwrite:
__snake_case = api_doc
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def _UpperCamelCase (_lowerCamelCase : int=False )-> Optional[int]:
'''simple docstring'''
with open(_lowerCamelCase , encoding='''utf-8''' ) as f:
__snake_case = yaml.safe_load(f.read() )
# Get to the API doc
__snake_case = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case = content[api_idx]['''sections''']
# Then to the model doc
__snake_case = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__snake_case = False
__snake_case = api_doc[pipeline_idx]['''sections''']
__snake_case = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__snake_case = pipeline_doc['''section''']
__snake_case = clean_doc_toc(_lowerCamelCase )
if overwrite:
__snake_case = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCamelCase )
# sort overall pipeline doc
__snake_case = clean_doc_toc(_lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
__snake_case = True
if overwrite:
__snake_case = new_pipeline_docs
if diff:
if overwrite:
__snake_case = api_doc
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCAmelCase_ : Dict = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) )
def _UpperCamelCase ()-> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase : Callable , _lowerCamelCase : int ) -> None:
__snake_case = f'''{func.__name__}({value})'''
__snake_case = timeit(f'''__main__.{call}''' , setup='''import __main__''' )
print(f'''{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 1 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int )-> int:
'''simple docstring'''
while b:
__snake_case , __snake_case = b, a % b
return a
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int )-> int:
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(_lowerCamelCase , a % b )
def _UpperCamelCase ()-> Optional[Any]:
'''simple docstring'''
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__snake_case = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_lowerCamelCase ) , '''Postfix'''.center(_lowerCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
__snake_case = ''')''' # change "(" to ")"
elif infix[i] == ")":
__snake_case = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase_ : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 24 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCAmelCase_ : Union[str, Any] = '''\
'''
UpperCAmelCase_ : int = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
UpperCAmelCase_ : str = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCAmelCase ( datasets.Metric):
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__snake_case = '''cuda'''
else:
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
__snake_case = model.to(__SCREAMING_SNAKE_CASE )
__snake_case = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__snake_case = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__snake_case = model.config.max_length - 1
else:
__snake_case = model.config.max_length
__snake_case = tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , return_attention_mask=__SCREAMING_SNAKE_CASE , ).to(__SCREAMING_SNAKE_CASE )
__snake_case = encodings['''input_ids''']
__snake_case = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__snake_case = []
__snake_case = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) ):
__snake_case = min(start_index + batch_size , len(__SCREAMING_SNAKE_CASE ) )
__snake_case = encoded_texts[start_index:end_index]
__snake_case = attn_masks[start_index:end_index]
if add_start_token:
__snake_case = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE )
__snake_case = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__snake_case = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask] , dim=1 )
__snake_case = encoded_batch
with torch.no_grad():
__snake_case = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).logits
__snake_case = out_logits[..., :-1, :].contiguous()
__snake_case = labels[..., 1:].contiguous()
__snake_case = attn_mask[..., 1:].contiguous()
__snake_case = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
| 24 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase):
__lowercase : List[Any] = '''swin'''
__lowercase : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=96 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(__SCREAMING_SNAKE_CASE )
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__snake_case = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[int] = version.parse('''1.11''')
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 24 | 1 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
UpperCAmelCase_ : Dict = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
UpperCAmelCase_ : Union[str, Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
UpperCAmelCase_ : List[Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCAmelCase ( datasets.Metric):
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE="binary" , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__snake_case = fa_score(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , pos_label=__SCREAMING_SNAKE_CASE , average=__SCREAMING_SNAKE_CASE , sample_weight=__SCREAMING_SNAKE_CASE )
return {"f1": float(__SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
| 24 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = int(_lowerCamelCase )
__snake_case , __snake_case , __snake_case = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : List[Any]=3_00 )-> int:
'''simple docstring'''
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _UpperCamelCase (_lowerCamelCase : int )-> List[Any]:
'''simple docstring'''
__snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__snake_case = f'''{elt:.6f}''' if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(_lowerCamelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCAmelCase :
__lowercase : str = 5
__lowercase : Optional[Any] = 0.2
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 300 , ) -> List[Any]:
'''simple docstring'''
__snake_case = total
__snake_case = '''''' if prefix is None else prefix
__snake_case = leave
__snake_case = parent
__snake_case = width
__snake_case = None
__snake_case = None
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None ) -> Any:
'''simple docstring'''
__snake_case = value
if comment is not None:
__snake_case = comment
if self.last_value is None:
__snake_case = __snake_case = time.time()
__snake_case = __snake_case = value
__snake_case = __snake_case = None
__snake_case = self.warmup
__snake_case = 1
self.update_bar(__SCREAMING_SNAKE_CASE )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__snake_case = time.time()
__snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__snake_case = self.elapsed_time / (value - self.start_value)
else:
__snake_case = None
if value >= self.total:
__snake_case = self.total
__snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__SCREAMING_SNAKE_CASE )
__snake_case = value
__snake_case = current_time
if self.average_time_per_item is None:
__snake_case = 1
else:
__snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__SCREAMING_SNAKE_CASE ) )) + str(__SCREAMING_SNAKE_CASE )
if self.elapsed_time is None:
__snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__snake_case = None if column_names is None else [column_names]
__snake_case = None
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if self.inner_table is None:
__snake_case = [list(values.keys() ), list(values.values() )]
else:
__snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__SCREAMING_SNAKE_CASE )
__snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=300 ) -> List[str]:
'''simple docstring'''
__snake_case = NotebookProgressBar(__SCREAMING_SNAKE_CASE , prefix=__SCREAMING_SNAKE_CASE , parent=self , width=__SCREAMING_SNAKE_CASE )
return self.child_bar
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = None
self.display()
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self ) -> str:
'''simple docstring'''
__snake_case = None
__snake_case = None
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__snake_case = 0
__snake_case = 0
__snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__snake_case = NotebookTrainingTracker(state.max_steps , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if not has_length(__SCREAMING_SNAKE_CASE ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__snake_case = self.training_tracker.add_child(len(__SCREAMING_SNAKE_CASE ) )
else:
__snake_case = NotebookProgressBar(len(__SCREAMING_SNAKE_CASE ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__snake_case = state.global_step
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if self.training_tracker is not None:
__snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__snake_case = log['''loss''']
break
if self.first_column == "Epoch":
__snake_case = int(state.epoch )
else:
__snake_case = state.global_step
__snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__snake_case = re.sub(r'''\_loss$''' , '''''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''total_flos''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''epoch''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __SCREAMING_SNAKE_CASE )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__snake_case = v
else:
__snake_case = k.split('''_''' )
__snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
__snake_case = v
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
self.training_tracker.remove_child()
__snake_case = None
# Evaluation takes a long time so we should force the next update.
__snake_case = True
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__SCREAMING_SNAKE_CASE )
__snake_case = None
| 24 | 1 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : dict )-> str:
'''simple docstring'''
__snake_case = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , '''html.parser''' )
__snake_case = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
__snake_case = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 3_0,
'''pages''': '''3979-3990''',
'''year''': 2_0_1_8,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> int:
'''simple docstring'''
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
UpperCAmelCase_ : Optional[Any] = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def _UpperCamelCase (_lowerCamelCase : List[Any] )-> Union[str, Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [image]
__snake_case = [trans(img.convert('''RGB''' ) ) for img in image]
__snake_case = torch.stack(_lowerCamelCase )
return image
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
__snake_case = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = min(int(num_inference_steps * strength ) , __SCREAMING_SNAKE_CASE )
__snake_case = max(num_inference_steps - init_timestep , 0 )
__snake_case = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Tuple:
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__SCREAMING_SNAKE_CASE )}''' )
__snake_case = image.to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case = init_latents.shape
__snake_case = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )
# get latents
print('''add noise to latents at timestep''' , __SCREAMING_SNAKE_CASE )
__snake_case = self.scheduler.add_noise(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = init_latents
return latents
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0.8 , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 50 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(__SCREAMING_SNAKE_CASE )
# 2. Preprocess image
__snake_case = preprocess(__SCREAMING_SNAKE_CASE )
# 3. set timesteps
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device )
__snake_case , __snake_case = self.get_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
__snake_case = timesteps[:1].repeat(__SCREAMING_SNAKE_CASE )
# 4. Prepare latent variables
__snake_case = self.prepare_latents(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.unet.dtype , self.device , __SCREAMING_SNAKE_CASE )
__snake_case = latents
# 5. Denoising loop
for t in self.progress_bar(__SCREAMING_SNAKE_CASE ):
# 1. predict noise model_output
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__snake_case = self.scheduler.step(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , use_clipped_model_output=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , ).prev_sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 24 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = []
for part_id in partition_order:
__snake_case = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(2 )
__snake_case = [1, 0]
__snake_case = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(1 )
__snake_case = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__snake_case = lambda _lowerCamelCase : x.reverse()
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 24 | 1 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : list[int] , _lowerCamelCase : int )-> bool:
'''simple docstring'''
__snake_case = len(_lowerCamelCase )
__snake_case = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__snake_case = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__snake_case = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__snake_case = subset[i - 1][j]
if arr[i - 1] <= j:
__snake_case = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> float:
'''simple docstring'''
__snake_case = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _UpperCamelCase ()-> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 24 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=False )-> Union[str, Any]:
'''simple docstring'''
try:
__snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case = strtobool(_lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCAmelCase_ : Dict = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCAmelCase_ : int = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCAmelCase_ : Tuple = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCAmelCase_ : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCAmelCase_ : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : Union[str, Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCAmelCase_ : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[Any]:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__snake_case = unittest.skip('''test requires faiss''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[str]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__snake_case = unittest.skip('''test requires regex''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__snake_case = unittest.skip('''test requires elasticsearch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__snake_case = unittest.skip('''test requires sqlalchemy''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__snake_case = unittest.skip('''test requires PyTorch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
if not config.TF_AVAILABLE:
__snake_case = unittest.skip('''test requires TensorFlow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
__snake_case = unittest.skip('''test requires JAX''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
if not config.PIL_AVAILABLE:
__snake_case = unittest.skip('''test requires Pillow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> Any:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Tuple:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> str:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Dict:
'''simple docstring'''
def _require_spacy_model(_lowerCamelCase : int ):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowerCamelCase ) )(_lowerCamelCase )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase (_lowerCamelCase : str )-> Dict:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> int:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case = unittest.skip('''test is slow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> Optional[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__snake_case = unittest.skip('''test is local''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : str )-> int:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case = unittest.skip('''test is packaged''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> str:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case = unittest.skip('''test requires remote''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (*_lowerCamelCase : str )-> Optional[int]:
'''simple docstring'''
def decorate(cls : Optional[Any] ):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase ) and name.startswith('''test''' ):
for decorator in decorators:
__snake_case = decorator(_lowerCamelCase )
setattr(cls , _lowerCamelCase , _lowerCamelCase )
return cls
return decorate
class lowerCAmelCase ( __lowerCAmelCase):
pass
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : List[str] = 0
__lowercase : Dict = 1
__lowercase : List[Any] = 2
@contextmanager
def _UpperCamelCase (_lowerCamelCase : Dict=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : Optional[int]=1E-16 )-> Tuple:
'''simple docstring'''
__snake_case = requests.Session().request
def timeout_request(_lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : str , **_lowerCamelCase : Any ):
# Change the url to an invalid url so that the connection hangs
__snake_case = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__snake_case = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case = url
__snake_case = e.args[0]
__snake_case = (max_retry_error.args[0].replace('''10.255.255.1''' , f'''OfflineMock[{url}]''' ),)
__snake_case = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , **_lowerCamelCase : Dict ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=_lowerCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowerCamelCase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _UpperCamelCase (*_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : List[str] )-> Any:
'''simple docstring'''
__snake_case = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase ) as tmp_dir:
try:
os.chdir(_lowerCamelCase )
yield
finally:
os.chdir(_lowerCamelCase )
@contextmanager
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : int )-> Any:
'''simple docstring'''
return deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist()
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : int , *_lowerCamelCase : int , **_lowerCamelCase : Optional[int] ):
try:
return func(*_lowerCamelCase , **_lowerCamelCase )
except HTTPError as err:
if str(_lowerCamelCase ).startswith('''500''' ) or str(_lowerCamelCase ).startswith('''502''' ):
pytest.xfail(str(_lowerCamelCase ) )
raise err
return decorator.decorator(_wrapper , _lowerCamelCase )
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = returncode
__snake_case = stdout
__snake_case = stderr
async def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] )-> Dict:
'''simple docstring'''
while True:
__snake_case = await stream.readline()
if line:
callback(_lowerCamelCase )
else:
break
async def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Dict=False , _lowerCamelCase : List[Any]=False )-> _RunOutput:
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(_lowerCamelCase ) )
__snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case = []
__snake_case = []
def tee(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Dict="" ):
__snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(_lowerCamelCase )
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label='''stderr:''' ) ),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[Any]=1_80 , _lowerCamelCase : Dict=False , _lowerCamelCase : int=True )-> _RunOutput:
'''simple docstring'''
__snake_case = asyncio.get_event_loop()
__snake_case = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase ) )
__snake_case = ''' '''.join(_lowerCamelCase )
if result.returncode > 0:
__snake_case = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
__snake_case = re.sub(R'''^gw''' , '''''' , _lowerCamelCase , 0 , re.M )
return int(_lowerCamelCase )
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = 2_95_00
__snake_case = pytest_xdist_worker_id()
return port + uniq_delta
| 24 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = '''▁'''
UpperCAmelCase_ : Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
UpperCAmelCase_ : Tuple = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
UpperCAmelCase_ : str = {
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
UpperCAmelCase_ : Any = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : int = VOCAB_FILES_NAMES
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : int = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[str] = ['''input_ids''', '''attention_mask''']
__lowercase : List[int] = []
__lowercase : List[int] = []
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
__snake_case = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__snake_case = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__snake_case = 1
__snake_case = len(self.sp_model )
__snake_case = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__SCREAMING_SNAKE_CASE )
}
__snake_case = {v: k for k, v in self.lang_code_to_id.items()}
__snake_case = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__snake_case = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__snake_case = src_lang if src_lang is not None else '''en_XX'''
__snake_case = self.lang_code_to_id[self._src_lang]
__snake_case = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> int:
'''simple docstring'''
__snake_case = self.__dict__.copy()
__snake_case = None
__snake_case = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__snake_case = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
__snake_case = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = [1] * len(self.prefix_tokens )
__snake_case = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__snake_case = src_lang
__snake_case = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
__snake_case = tgt_lang_id
return inputs
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
'''simple docstring'''
__snake_case = src_lang
__snake_case = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
__snake_case = self.lang_code_to_id[src_lang]
__snake_case = []
__snake_case = [self.eos_token_id, self.cur_lang_code]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
__snake_case = self.lang_code_to_id[lang]
__snake_case = []
__snake_case = [self.eos_token_id, self.cur_lang_code]
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__snake_case = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ : List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase_ : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : List[Any] = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
'''simple docstring'''
import argparse
import os
import re
UpperCAmelCase_ : List[str] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCAmelCase_ : Tuple = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCAmelCase_ : Dict = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : bool = False )-> str:
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__snake_case = f.read()
__snake_case = content.split('''\n''' )
__snake_case = []
__snake_case = 0
while line_idx < len(_lowerCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__snake_case = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__snake_case = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__snake_case = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__snake_case = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : _re_identifier.search(_lowerCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_lowerCamelCase ) )
elif "\n".join(_lowerCamelCase ) != content:
return True
def _UpperCamelCase (_lowerCamelCase : bool = False )-> Tuple:
'''simple docstring'''
__snake_case = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for f in os.listdir(_lowerCamelCase ) if f.endswith('''.py''' )]
__snake_case = [sort_auto_mapping(_lowerCamelCase , overwrite=_lowerCamelCase ) for fname in fnames]
if not overwrite and any(_lowerCamelCase ):
__snake_case = [f for f, d in zip(_lowerCamelCase , _lowerCamelCase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_lowerCamelCase )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCAmelCase_ : List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 24 | 1 |
'''simple docstring'''
UpperCAmelCase_ : List[Any] = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
1_0: '''a''',
1_1: '''b''',
1_2: '''c''',
1_3: '''d''',
1_4: '''e''',
1_5: '''f''',
}
def _UpperCamelCase (_lowerCamelCase : float )-> str:
'''simple docstring'''
assert type(_lowerCamelCase ) in (int, float) and decimal == int(_lowerCamelCase )
__snake_case = int(_lowerCamelCase )
__snake_case = ''''''
__snake_case = False
if decimal < 0:
__snake_case = True
decimal *= -1
while decimal > 0:
__snake_case , __snake_case = divmod(_lowerCamelCase , 16 )
__snake_case = values[remainder] + hexadecimal
__snake_case = '''0x''' + hexadecimal
if negative:
__snake_case = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase (*_lowerCamelCase : str , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=2 )-> str:
'''simple docstring'''
from .. import __version__
__snake_case = take_from
__snake_case = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__snake_case = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case = call_frame.filename
__snake_case = call_frame.lineno
__snake_case = call_frame.function
__snake_case , __snake_case = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 24 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Dict = DiTPipeline
__lowercase : List[str] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__lowercase : Optional[int] = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
__lowercase : str = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__lowercase : Any = False
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__SCREAMING_SNAKE_CASE , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=__SCREAMING_SNAKE_CASE , )
__snake_case = AutoencoderKL()
__snake_case = DDIMScheduler()
__snake_case = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> List[Any]:
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
__snake_case = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__snake_case = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__snake_case = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = '''cpu'''
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__snake_case = pipe(**__SCREAMING_SNAKE_CASE ).images
__snake_case = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__snake_case = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
__snake_case = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=__SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = torch.manual_seed(0 )
__snake_case = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
__snake_case = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
__snake_case = pipe.get_label_ids(__SCREAMING_SNAKE_CASE )
__snake_case = pipe(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
__snake_case = ['''vase''', '''umbrella''']
__snake_case = pipe.get_label_ids(__SCREAMING_SNAKE_CASE )
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 24 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : str )-> List[str]:
'''simple docstring'''
__snake_case = old_name
if "patch_embed" in old_name:
__snake_case , __snake_case , __snake_case = old_name.split('''.''' )
if layer == "0":
__snake_case = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__snake_case = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__snake_case = old_name.replace('''3''' , '''convolution2''' )
else:
__snake_case = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , _lowerCamelCase ):
__snake_case = R'''\b\d{2}\b'''
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
__snake_case = re.search(R'''\d\.\d\d.''' , _lowerCamelCase ).group()
else:
__snake_case = re.search(R'''\d\.\d.''' , _lowerCamelCase ).group()
if int(match[0] ) < 6:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
__snake_case = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__snake_case = '''intermediate_stages.''' + trimmed_name
else:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__snake_case = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__snake_case = str(int(match[2] ) - num_meta4D_last_stage )
__snake_case = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__snake_case = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__snake_case = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__snake_case = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__snake_case = trimmed_name.replace('''fc2''' , '''linear_out''' )
__snake_case = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , _lowerCamelCase ):
__snake_case = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__snake_case = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__snake_case = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__snake_case = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__snake_case = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__snake_case = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__snake_case = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__snake_case = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__snake_case = new_name.replace('''norm''' , '''layernorm''' )
__snake_case = '''efficientformer.''' + new_name
else:
__snake_case = '''efficientformer.encoder.''' + new_name
return new_name
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
for key in checkpoint.copy().keys():
__snake_case = checkpoint.pop(_lowerCamelCase )
__snake_case = val
return checkpoint
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def _UpperCamelCase (_lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : bool )-> Optional[Any]:
'''simple docstring'''
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
__snake_case = EfficientFormerConfig.from_json_file(_lowerCamelCase )
__snake_case = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
__snake_case = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__snake_case = config.depths[-1] - config.num_metaad_blocks + 1
__snake_case = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__snake_case = prepare_img()
__snake_case = 2_56
__snake_case = 2_24
__snake_case = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__snake_case = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
__snake_case = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
__snake_case = model(_lowerCamelCase )
__snake_case = outputs.logits
__snake_case = (1, 10_00)
if "l1" in model_name:
__snake_case = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__snake_case = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__snake_case = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCamelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 24 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCAmelCase)
class lowerCAmelCase ( __lowerCAmelCase):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__lowercase : str = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True})
__lowercase : ClassVar[Features] = Features({'''question''': Value('''string'''), '''context''': Value('''string''')})
__lowercase : ClassVar[Features] = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string'''),
'''answer_start''': Value('''int32'''),
})
})
__lowercase : str = "question"
__lowercase : str = "context"
__lowercase : str = "answers"
@property
def lowerCAmelCase ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 24 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ) -> Tuple:
'''simple docstring'''
__snake_case = size if size is not None else {'''shortest_edge''': 20}
__snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_flip_channel_order
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Union[str, Any] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_flip_channel_order''' ) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : List[Any] = KandinskyImgaImgPipeline
__lowercase : int = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
__lowercase : List[Any] = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
__lowercase : Optional[int] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__lowercase : List[Any] = False
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
return 32
@property
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return 100
@property
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__snake_case = MultilingualCLIP(__SCREAMING_SNAKE_CASE )
__snake_case = text_encoder.eval()
return text_encoder
@property
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__snake_case = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_unet
__snake_case = self.dummy_movq
__snake_case = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00_085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
__snake_case = DDIMScheduler(**__SCREAMING_SNAKE_CASE )
__snake_case = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> List[str]:
'''simple docstring'''
__snake_case = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__snake_case = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__SCREAMING_SNAKE_CASE )
# create init_image
__snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert('''RGB''' ).resize((256, 256) )
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
__snake_case = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__snake_case = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__snake_case = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = '''cpu'''
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__snake_case = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
__snake_case = output.images
__snake_case = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
__snake_case = image[0, -3:, -3:, -1]
__snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__snake_case = '''A red cartoon frog, 4k'''
__snake_case = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
__snake_case = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
__snake_case = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case , __snake_case = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__snake_case = pipeline(
__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
__snake_case = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 24 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "arrow" , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = load_from_cache_file
__snake_case = file_format
__snake_case = Spark(
df=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , working_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__snake_case = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__SCREAMING_SNAKE_CASE , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 24 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Tuple = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Union[str, Any] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = LEDTokenizer
__lowercase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**__SCREAMING_SNAKE_CASE )
__snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case = '''post_processor'''
__snake_case = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
__snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case = tuple(state['''sep'''] )
if "cls" in state:
__snake_case = tuple(state['''cls'''] )
__snake_case = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = add_prefix_space
__snake_case = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
__snake_case = trim_offsets
__snake_case = True
if changes_to_apply:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
__snake_case = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
__snake_case = value
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
'''simple docstring'''
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> dict:
'''simple docstring'''
__snake_case = super()._pad(
encoded_inputs=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding_strategy=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
# Load from model defaults
if return_attention_mask is None:
__snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(__SCREAMING_SNAKE_CASE )
if needs_to_be_padded:
__snake_case = len(__SCREAMING_SNAKE_CASE ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 24 | 1 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : list[int] )-> list[list[int]]:
'''simple docstring'''
__snake_case = []
if len(_lowerCamelCase ) == 1:
return [nums.copy()]
for _ in range(len(_lowerCamelCase ) ):
__snake_case = nums.pop(0 )
__snake_case = permute(_lowerCamelCase )
for perm in permutations:
perm.append(_lowerCamelCase )
result.extend(_lowerCamelCase )
nums.append(_lowerCamelCase )
return result
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> Optional[Any]:
'''simple docstring'''
def backtrack(_lowerCamelCase : str ):
if start == len(_lowerCamelCase ) - 1:
output.append(nums[:] )
else:
for i in range(_lowerCamelCase , len(_lowerCamelCase ) ):
__snake_case , __snake_case = nums[i], nums[start]
backtrack(start + 1 )
__snake_case , __snake_case = nums[i], nums[start] # backtrack
__snake_case = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
UpperCAmelCase_ : Union[str, Any] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 24 |
'''simple docstring'''
from collections import deque
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
__snake_case = len(_lowerCamelCase )
__snake_case = deque()
__snake_case = [False for _ in range(_lowerCamelCase )]
__snake_case = [-1 for _ in range(_lowerCamelCase )]
__snake_case = index_of[:]
def strong_connect(_lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
__snake_case = index # the number when this node is seen
__snake_case = index # lowest rank node reachable from here
index += 1
stack.append(_lowerCamelCase )
__snake_case = True
for w in g[v]:
if index_of[w] == -1:
__snake_case = strong_connect(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__snake_case = []
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
while w != v:
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
components.append(_lowerCamelCase )
return index
__snake_case = []
for v in range(_lowerCamelCase ):
if index_of[v] == -1:
strong_connect(_lowerCamelCase , 0 , _lowerCamelCase )
return components
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = [[] for _ in range(_lowerCamelCase )]
for u, v in edges:
g[u].append(_lowerCamelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ : List[str] = 7
UpperCAmelCase_ : int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ : List[str] = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ : Tuple = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 24 | 1 |
'''simple docstring'''
from collections import deque
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
__snake_case = len(_lowerCamelCase )
__snake_case = deque()
__snake_case = [False for _ in range(_lowerCamelCase )]
__snake_case = [-1 for _ in range(_lowerCamelCase )]
__snake_case = index_of[:]
def strong_connect(_lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
__snake_case = index # the number when this node is seen
__snake_case = index # lowest rank node reachable from here
index += 1
stack.append(_lowerCamelCase )
__snake_case = True
for w in g[v]:
if index_of[w] == -1:
__snake_case = strong_connect(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__snake_case = []
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
while w != v:
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
components.append(_lowerCamelCase )
return index
__snake_case = []
for v in range(_lowerCamelCase ):
if index_of[v] == -1:
strong_connect(_lowerCamelCase , 0 , _lowerCamelCase )
return components
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = [[] for _ in range(_lowerCamelCase )]
for u, v in edges:
g[u].append(_lowerCamelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ : List[str] = 7
UpperCAmelCase_ : int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ : List[str] = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ : Tuple = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 24 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = BarthezTokenizer
__lowercase : Any = BarthezTokenizerFast
__lowercase : Dict = True
__lowercase : Optional[int] = True
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = [0, 57, 3018, 7_0307, 91, 2]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
| 24 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : int = '''wavlm'''
def __init__( self , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE="group" , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) , __SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) , __SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=320 , __SCREAMING_SNAKE_CASE=800 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.05 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=320 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="mean" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1500) , __SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) , __SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
__snake_case = hidden_size
__snake_case = feat_extract_norm
__snake_case = feat_extract_activation
__snake_case = list(__SCREAMING_SNAKE_CASE )
__snake_case = list(__SCREAMING_SNAKE_CASE )
__snake_case = list(__SCREAMING_SNAKE_CASE )
__snake_case = conv_bias
__snake_case = num_buckets
__snake_case = max_bucket_distance
__snake_case = num_conv_pos_embeddings
__snake_case = num_conv_pos_embedding_groups
__snake_case = len(self.conv_dim )
__snake_case = num_hidden_layers
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_attention_heads
__snake_case = hidden_dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = feat_proj_dropout
__snake_case = final_dropout
__snake_case = layerdrop
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = num_ctc_classes
__snake_case = vocab_size
__snake_case = do_stable_layer_norm
__snake_case = use_weighted_layer_sum
__snake_case = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case = apply_spec_augment
__snake_case = mask_time_prob
__snake_case = mask_time_length
__snake_case = mask_time_min_masks
__snake_case = mask_feature_prob
__snake_case = mask_feature_length
# parameters for pretraining with codevector quantized representations
__snake_case = num_codevectors_per_group
__snake_case = num_codevector_groups
__snake_case = contrastive_logits_temperature
__snake_case = num_negatives
__snake_case = codevector_dim
__snake_case = proj_codevector_dim
__snake_case = diversity_loss_weight
# ctc loss
__snake_case = ctc_loss_reduction
__snake_case = ctc_zero_infinity
# adapter
__snake_case = add_adapter
__snake_case = adapter_kernel_size
__snake_case = adapter_stride
__snake_case = num_adapter_layers
__snake_case = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__snake_case = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__snake_case = list(__SCREAMING_SNAKE_CASE )
__snake_case = list(__SCREAMING_SNAKE_CASE )
__snake_case = list(__SCREAMING_SNAKE_CASE )
__snake_case = xvector_output_dim
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 24 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ) -> Any:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Tuple = True
__lowercase : Optional[int] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__SCREAMING_SNAKE_CASE )
__snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class lowerCAmelCase ( unittest.TestCase):
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__snake_case = jnp.array([[0, 1, 2, 3, 4, 5]] )
__snake_case = model(__SCREAMING_SNAKE_CASE )[0]
__snake_case = 5_0000
__snake_case = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__snake_case = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 24 | 1 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _UpperCamelCase (_lowerCamelCase : list[list[float]] )-> list[list[float]]:
'''simple docstring'''
__snake_case = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_lowerCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__snake_case = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
__snake_case = [[0.0, 0.0], [0.0, 0.0]]
__snake_case , __snake_case = matrix[1][1], matrix[0][0]
__snake_case , __snake_case = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_lowerCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_lowerCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__snake_case = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
__snake_case = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__snake_case = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__snake_case = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__snake_case = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__snake_case = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__snake_case = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__snake_case = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__snake_case = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__snake_case = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__snake_case = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__snake_case = array(_lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
__snake_case = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__snake_case = array(_lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_lowerCamelCase )
# Calculate the inverse of the matrix
return [[float(d(_lowerCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 24 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
__snake_case = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] )-> Tuple:
'''simple docstring'''
__snake_case = dct.pop(_lowerCamelCase )
__snake_case = val
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple )-> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__snake_case = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
__snake_case = qkv_bias
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Tuple )-> Dict:
'''simple docstring'''
__snake_case = 3_64 if '''coco''' in model_name else 2_24
__snake_case = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__snake_case = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=False )-> Dict:
'''simple docstring'''
__snake_case = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__snake_case = tokenizer('''\n''' , add_special_tokens=_lowerCamelCase ).input_ids[0]
__snake_case , __snake_case = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
__snake_case = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
__snake_case = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__snake_case , __snake_case = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case , __snake_case , __snake_case = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
__snake_case = original_model.state_dict()
__snake_case = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__snake_case = state_dict.pop(_lowerCamelCase )
if key.startswith('''Qformer.bert''' ):
__snake_case = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__snake_case = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__snake_case = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__snake_case = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__snake_case = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__snake_case = key.replace('''t5''' , '''language''' )
__snake_case = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__snake_case = load_demo_image()
__snake_case = vis_processors['''eval'''](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
__snake_case = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
# create processor
__snake_case = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__snake_case = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
__snake_case = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
__snake_case = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__snake_case = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__snake_case = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__snake_case = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
__snake_case = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__snake_case = ''''''
__snake_case = tokenizer(_lowerCamelCase , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
__snake_case = original_model.generate({'''image''': original_pixel_values} )
__snake_case = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , _lowerCamelCase )
__snake_case = input_ids.shape[1]
__snake_case = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
__snake_case = [text.strip() for text in output_text]
print('''HF generation:''' , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
UpperCAmelCase_ : Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCAmelCase_ : List[str] = logging.getLogger(__name__)
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=_lowerCamelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=_lowerCamelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=_lowerCamelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=_lowerCamelCase , default=10_00 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=_lowerCamelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=_lowerCamelCase , default=5_12 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=_lowerCamelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
__snake_case = parser.parse_args()
return args
def _UpperCamelCase (_lowerCamelCase : int )-> Any:
'''simple docstring'''
def fn(_lowerCamelCase : List[str] ):
return tokenizer(examples['''text'''] )
return fn
def _UpperCamelCase (_lowerCamelCase : Dict )-> Any:
'''simple docstring'''
__snake_case = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
__snake_case = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
__snake_case = tf.train.Features(feature=_lowerCamelCase )
__snake_case = tf.train.Example(features=_lowerCamelCase )
__snake_case = example.SerializeToString()
records.append(_lowerCamelCase )
return records
def _UpperCamelCase (_lowerCamelCase : str )-> Any:
'''simple docstring'''
__snake_case = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__snake_case = min(len(_lowerCamelCase ) , args.limit )
__snake_case = dataset.select(range(_lowerCamelCase ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
__snake_case = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__snake_case = os.path.join(args.output_dir , args.split )
if not os.path.exists(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
else:
__snake_case = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__snake_case = tokenize_function(_lowerCamelCase )
__snake_case = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_lowerCamelCase : Optional[int] ):
# Concatenate all texts.
__snake_case = {k: sum(examples[k] , [] ) for k in examples.keys()}
__snake_case = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__snake_case = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__snake_case = {
k: [t[i : i + args.max_length] for i in range(0 , _lowerCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__snake_case = dataset_tokenized.map(_lowerCamelCase , batched=_lowerCamelCase , batch_size=10_00 , num_proc=4 )
__snake_case = 0
__snake_case = 0
for shard in range(0 , len(_lowerCamelCase ) , args.shard_size ):
__snake_case = grouped_dataset[shard : shard + args.shard_size]
__snake_case = len(dataset_snapshot['''input_ids'''] )
__snake_case = os.path.join(_lowerCamelCase , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
__snake_case = get_serialized_examples(_lowerCamelCase )
with tf.io.TFRecordWriter(_lowerCamelCase ) as out_file:
for i in range(len(_lowerCamelCase ) ):
__snake_case = serialized_examples[i]
out_file.write(_lowerCamelCase )
print('''Wrote file {} containing {} records'''.format(_lowerCamelCase , _lowerCamelCase ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , '''w''' ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = parse_args()
main(args)
| 24 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case , __snake_case = image[0].size
__snake_case , __snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return image
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__snake_case , __snake_case = mask[0].size
__snake_case , __snake_case = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__snake_case = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = mask.astype(np.floataa ) / 255.0
__snake_case = 0
__snake_case = 1
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return mask
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : UNetaDModel
__lowercase : RePaintScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 250 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__snake_case = image
__snake_case = _preprocess_image(__SCREAMING_SNAKE_CASE )
__snake_case = original_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = _preprocess_mask(__SCREAMING_SNAKE_CASE )
__snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case = original_image.shape
__snake_case = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
__snake_case = eta
__snake_case = self.scheduler.timesteps[0] + 1
__snake_case = generator[0] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
__snake_case = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__snake_case = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = t
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 24 | 1 |
'''simple docstring'''
UpperCAmelCase_ : Dict = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 24 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = {}
if "candidate_labels" in kwargs:
__snake_case = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__snake_case = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="This is a photo of {}." ) -> Optional[Any]:
'''simple docstring'''
__snake_case = load_image(__SCREAMING_SNAKE_CASE )
__snake_case = self.image_processor(images=[image] , return_tensors=self.framework )
__snake_case = candidate_labels
__snake_case = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__snake_case = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__snake_case = [text_inputs]
return inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = model_inputs.pop('''candidate_labels''' )
__snake_case = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = text_inputs[0]
else:
# Batching case.
__snake_case = text_inputs[0][0]
__snake_case = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = model_outputs.pop('''candidate_labels''' )
__snake_case = model_outputs['''logits'''][0]
if self.framework == "pt":
__snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
__snake_case = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [scores]
elif self.framework == "tf":
__snake_case = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__snake_case = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__snake_case = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 24 | 1 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
UpperCAmelCase_ : Any = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def _UpperCamelCase (_lowerCamelCase : Any )-> str:
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
UpperCAmelCase_ : Tuple = parser.parse_args()
if args.check_lib:
UpperCAmelCase_ : Any = importlib.import_module('''transformers''')
UpperCAmelCase_ : Any = Path(transformers_module.__file__).parent
else:
UpperCAmelCase_ : List[Any] = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 24 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 1 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Any = ['''audio_values''', '''audio_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=2048 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=[16, 16] , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=4_4100 , __SCREAMING_SNAKE_CASE=86 , __SCREAMING_SNAKE_CASE=2048 , __SCREAMING_SNAKE_CASE=0.0 , **__SCREAMING_SNAKE_CASE , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = spectrogram_length
__snake_case = num_channels
__snake_case = patch_size
__snake_case = feature_size // self.patch_size[1]
__snake_case = n_fft
__snake_case = sampling_rate // hop_length_to_sampling_rate
__snake_case = sampling_rate
__snake_case = padding_value
__snake_case = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , ).T
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> np.ndarray:
'''simple docstring'''
__snake_case = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
__snake_case = log_spec[:, :-1]
__snake_case = log_spec - 20.0
__snake_case = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__snake_case = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
__snake_case = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__snake_case = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
__snake_case = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__snake_case = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__snake_case = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__snake_case = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__snake_case = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__snake_case = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__snake_case = np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa )
# convert into correct format for padding
__snake_case = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__snake_case = np.ones([len(__SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__snake_case = padded_audio_features * self.padding_value
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
__snake_case = audio_features[i]
__snake_case = feature
# return as BatchFeature
if return_attention_mask:
__snake_case = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
__snake_case = {'''audio_values''': padded_audio_features}
__snake_case = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_inputs
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) )
def _UpperCamelCase ()-> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase : Callable , _lowerCamelCase : int ) -> None:
__snake_case = f'''{func.__name__}({value})'''
__snake_case = timeit(f'''__main__.{call}''' , setup='''import __main__''' )
print(f'''{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ) -> Any:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Tuple = True
__lowercase : Optional[int] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__SCREAMING_SNAKE_CASE )
__snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class lowerCAmelCase ( unittest.TestCase):
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__snake_case = jnp.array([[0, 1, 2, 3, 4, 5]] )
__snake_case = model(__SCREAMING_SNAKE_CASE )[0]
__snake_case = 5_0000
__snake_case = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__snake_case = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__snake_case = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_lowerCamelCase ) , '''Postfix'''.center(_lowerCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
__snake_case = ''')''' # change "(" to ")"
elif infix[i] == ")":
__snake_case = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase_ : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 24 | 1 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> Optional[Any]:
'''simple docstring'''
__snake_case = tmp_path / '''cache'''
__snake_case = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__snake_case = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] )-> int:
'''simple docstring'''
__snake_case = tmp_path / '''cache'''
__snake_case = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__snake_case = features.copy() if features else default_expected_features
__snake_case = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] )-> str:
'''simple docstring'''
__snake_case = tmp_path / '''cache'''
__snake_case = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__snake_case = features.copy() if features else default_expected_features
__snake_case = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] )-> Any:
'''simple docstring'''
__snake_case = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__snake_case = features.copy()
__snake_case = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case = tmp_path / '''cache'''
__snake_case = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _UpperCamelCase (_lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : str )-> int:
'''simple docstring'''
__snake_case = tmp_path / '''cache'''
__snake_case = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__snake_case = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int )-> List[str]:
'''simple docstring'''
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case = jsonl_path
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case = [jsonl_path]
__snake_case = tmp_path / '''cache'''
__snake_case = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__snake_case = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : Optional[int]=("train",) )-> Tuple:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
for split in splits:
__snake_case = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] )-> str:
'''simple docstring'''
__snake_case = tmp_path / '''cache'''
__snake_case = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__snake_case = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict )-> List[str]:
'''simple docstring'''
__snake_case = tmp_path / '''cache'''
__snake_case = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__snake_case = features.copy() if features else default_expected_features
__snake_case = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case = JsonDatasetReader({'''train''': jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str )-> int:
'''simple docstring'''
if split:
__snake_case = {split: jsonl_path}
else:
__snake_case = '''train'''
__snake_case = {'''train''': jsonl_path, '''test''': jsonl_path}
__snake_case = tmp_path / '''cache'''
__snake_case = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__snake_case = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _UpperCamelCase (_lowerCamelCase : List[Any] )-> str:
'''simple docstring'''
return json.load(_lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
return [json.loads(_lowerCamelCase ) for line in buffer]
class lowerCAmelCase :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE ).write()
buffer.seek(0 )
__snake_case = load_json_function(__SCREAMING_SNAKE_CASE )
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert isinstance(exported_content[0] , __SCREAMING_SNAKE_CASE )
assert len(__SCREAMING_SNAKE_CASE ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE , orient=__SCREAMING_SNAKE_CASE ).write()
buffer.seek(0 )
__snake_case = load_json(__SCREAMING_SNAKE_CASE )
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__SCREAMING_SNAKE_CASE , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__SCREAMING_SNAKE_CASE ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE , num_proc=2 ).write()
buffer.seek(0 )
__snake_case = load_json_function(__SCREAMING_SNAKE_CASE )
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert isinstance(exported_content[0] , __SCREAMING_SNAKE_CASE )
assert len(__SCREAMING_SNAKE_CASE ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE , orient=__SCREAMING_SNAKE_CASE , num_proc=2 ).write()
buffer.seek(0 )
__snake_case = load_json(__SCREAMING_SNAKE_CASE )
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__SCREAMING_SNAKE_CASE , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__SCREAMING_SNAKE_CASE ) == 10
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
with pytest.raises(__SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
__snake_case = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , compression=__SCREAMING_SNAKE_CASE ).write()
with fsspec.open(__SCREAMING_SNAKE_CASE , '''rb''' , compression='''infer''' ) as f:
__snake_case = f.read()
with fsspec.open(__SCREAMING_SNAKE_CASE , '''rb''' , compression='''infer''' ) as f:
__snake_case = f.read()
assert exported_content == original_content
| 24 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase):
__lowercase : List[Any] = '''swin'''
__lowercase : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=96 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(__SCREAMING_SNAKE_CASE )
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__snake_case = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[int] = version.parse('''1.11''')
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 24 | 1 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> tuple[complex, complex]:
'''simple docstring'''
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
__snake_case = b * b - 4 * a * c
__snake_case = (-b + sqrt(_lowerCamelCase )) / (2 * a)
__snake_case = (-b - sqrt(_lowerCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case , __snake_case = quadratic_roots(a=5 , b=6 , c=1 )
print(f'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 24 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = int(_lowerCamelCase )
__snake_case , __snake_case , __snake_case = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : List[Any]=3_00 )-> int:
'''simple docstring'''
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _UpperCamelCase (_lowerCamelCase : int )-> List[Any]:
'''simple docstring'''
__snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__snake_case = f'''{elt:.6f}''' if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(_lowerCamelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCAmelCase :
__lowercase : str = 5
__lowercase : Optional[Any] = 0.2
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 300 , ) -> List[Any]:
'''simple docstring'''
__snake_case = total
__snake_case = '''''' if prefix is None else prefix
__snake_case = leave
__snake_case = parent
__snake_case = width
__snake_case = None
__snake_case = None
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None ) -> Any:
'''simple docstring'''
__snake_case = value
if comment is not None:
__snake_case = comment
if self.last_value is None:
__snake_case = __snake_case = time.time()
__snake_case = __snake_case = value
__snake_case = __snake_case = None
__snake_case = self.warmup
__snake_case = 1
self.update_bar(__SCREAMING_SNAKE_CASE )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__snake_case = time.time()
__snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__snake_case = self.elapsed_time / (value - self.start_value)
else:
__snake_case = None
if value >= self.total:
__snake_case = self.total
__snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__SCREAMING_SNAKE_CASE )
__snake_case = value
__snake_case = current_time
if self.average_time_per_item is None:
__snake_case = 1
else:
__snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__SCREAMING_SNAKE_CASE ) )) + str(__SCREAMING_SNAKE_CASE )
if self.elapsed_time is None:
__snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__snake_case = None if column_names is None else [column_names]
__snake_case = None
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if self.inner_table is None:
__snake_case = [list(values.keys() ), list(values.values() )]
else:
__snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__SCREAMING_SNAKE_CASE )
__snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=300 ) -> List[str]:
'''simple docstring'''
__snake_case = NotebookProgressBar(__SCREAMING_SNAKE_CASE , prefix=__SCREAMING_SNAKE_CASE , parent=self , width=__SCREAMING_SNAKE_CASE )
return self.child_bar
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = None
self.display()
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self ) -> str:
'''simple docstring'''
__snake_case = None
__snake_case = None
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__snake_case = 0
__snake_case = 0
__snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__snake_case = NotebookTrainingTracker(state.max_steps , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if not has_length(__SCREAMING_SNAKE_CASE ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__snake_case = self.training_tracker.add_child(len(__SCREAMING_SNAKE_CASE ) )
else:
__snake_case = NotebookProgressBar(len(__SCREAMING_SNAKE_CASE ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__snake_case = state.global_step
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if self.training_tracker is not None:
__snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__snake_case = log['''loss''']
break
if self.first_column == "Epoch":
__snake_case = int(state.epoch )
else:
__snake_case = state.global_step
__snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__snake_case = re.sub(r'''\_loss$''' , '''''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''total_flos''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''epoch''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __SCREAMING_SNAKE_CASE )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__snake_case = v
else:
__snake_case = k.split('''_''' )
__snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
__snake_case = v
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
self.training_tracker.remove_child()
__snake_case = None
# Evaluation takes a long time so we should force the next update.
__snake_case = True
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__SCREAMING_SNAKE_CASE )
__snake_case = None
| 24 | 1 |
'''simple docstring'''
from __future__ import annotations
UpperCAmelCase_ : Union[str, Any] = [True] * 1_0_0_0_0_0_1
UpperCAmelCase_ : Optional[Any] = 2
while i * i <= 1_0_0_0_0_0_0:
if seive[i]:
for j in range(i * i, 1_0_0_0_0_0_1, i):
UpperCAmelCase_ : Any = False
i += 1
def _UpperCamelCase (_lowerCamelCase : int )-> bool:
'''simple docstring'''
return seive[n]
def _UpperCamelCase (_lowerCamelCase : int )-> bool:
'''simple docstring'''
return any(digit in '''02468''' for digit in str(_lowerCamelCase ) )
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> list[int]:
'''simple docstring'''
__snake_case = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(_lowerCamelCase ) and not contains_an_even_digit(_lowerCamelCase ):
__snake_case = str(_lowerCamelCase )
__snake_case = [int(str_num[j:] + str_num[:j] ) for j in range(len(_lowerCamelCase ) )]
if all(is_prime(_lowerCamelCase ) for i in list_nums ):
result.append(_lowerCamelCase )
return result
def _UpperCamelCase ()-> int:
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"""{len(find_circular_primes()) = }""")
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> int:
'''simple docstring'''
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> int:
'''simple docstring'''
__snake_case = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
__snake_case = DetaConfig(
backbone_config=_lowerCamelCase , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=_lowerCamelCase , with_box_refine=_lowerCamelCase , two_stage=_lowerCamelCase , )
# set labels
__snake_case = '''huggingface/label-files'''
if "o365" in model_name:
__snake_case = 3_66
__snake_case = '''object365-id2label.json'''
else:
__snake_case = 91
__snake_case = '''coco-detection-id2label.json'''
__snake_case = num_labels
__snake_case = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__snake_case = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Any )-> List[Any]:
'''simple docstring'''
__snake_case = dct.pop(_lowerCamelCase )
__snake_case = val
def _UpperCamelCase (_lowerCamelCase : Optional[int] , _lowerCamelCase : Dict )-> Union[str, Any]:
'''simple docstring'''
__snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
__snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__snake_case = in_proj_weight[:dim, :]
__snake_case = in_proj_bias[: dim]
__snake_case = in_proj_weight[
dim : dim * 2, :
]
__snake_case = in_proj_bias[
dim : dim * 2
]
__snake_case = in_proj_weight[
-dim :, :
]
__snake_case = in_proj_bias[-dim :]
# fmt: on
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] )-> Any:
'''simple docstring'''
__snake_case = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
__snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__snake_case = in_proj_weight[:hidden_size, :]
__snake_case = in_proj_bias[:hidden_size]
__snake_case = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__snake_case = in_proj_bias[hidden_size : hidden_size * 2]
__snake_case = in_proj_weight[-hidden_size:, :]
__snake_case = in_proj_bias[-hidden_size:]
def _UpperCamelCase ()-> Optional[Any]:
'''simple docstring'''
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = get_deta_config(_lowerCamelCase )
# load original state dict
if model_name == "deta-swin-large":
__snake_case = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
__snake_case = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(_lowerCamelCase , param.shape )
# rename keys
__snake_case = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_swin_q_k_v(_lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCamelCase , _lowerCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__snake_case = state_dict.pop(_lowerCamelCase )
__snake_case = val
if "input_proj" in key:
__snake_case = state_dict.pop(_lowerCamelCase )
__snake_case = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__snake_case = state_dict.pop(_lowerCamelCase )
__snake_case = val
# finally, create HuggingFace model and load state dict
__snake_case = DetaForObjectDetection(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(_lowerCamelCase )
# load image processor
__snake_case = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
__snake_case = prepare_img()
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' )
__snake_case = encoding['''pixel_values''']
__snake_case = model(pixel_values.to(_lowerCamelCase ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__snake_case = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
__snake_case = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
__snake_case = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
__snake_case = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_lowerCamelCase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_lowerCamelCase ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = []
for part_id in partition_order:
__snake_case = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(2 )
__snake_case = [1, 0]
__snake_case = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(1 )
__snake_case = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__snake_case = lambda _lowerCamelCase : x.reverse()
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 24 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
UpperCAmelCase_ : Any = logging.get_logger(__name__)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> float:
'''simple docstring'''
__snake_case = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _UpperCamelCase ()-> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : List[Any] = {
'''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''],
'''tokenization_m2m_100''': ['''M2M100Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''M2M100ForConditionalGeneration''',
'''M2M100Model''',
'''M2M100PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=False )-> Union[str, Any]:
'''simple docstring'''
try:
__snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case = strtobool(_lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCAmelCase_ : Dict = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCAmelCase_ : int = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCAmelCase_ : Tuple = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCAmelCase_ : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCAmelCase_ : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : Union[str, Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCAmelCase_ : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[Any]:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__snake_case = unittest.skip('''test requires faiss''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[str]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__snake_case = unittest.skip('''test requires regex''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__snake_case = unittest.skip('''test requires elasticsearch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__snake_case = unittest.skip('''test requires sqlalchemy''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__snake_case = unittest.skip('''test requires PyTorch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
if not config.TF_AVAILABLE:
__snake_case = unittest.skip('''test requires TensorFlow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
__snake_case = unittest.skip('''test requires JAX''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
if not config.PIL_AVAILABLE:
__snake_case = unittest.skip('''test requires Pillow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> Any:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Tuple:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> str:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Dict:
'''simple docstring'''
def _require_spacy_model(_lowerCamelCase : int ):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowerCamelCase ) )(_lowerCamelCase )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase (_lowerCamelCase : str )-> Dict:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> int:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case = unittest.skip('''test is slow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> Optional[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__snake_case = unittest.skip('''test is local''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : str )-> int:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case = unittest.skip('''test is packaged''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> str:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case = unittest.skip('''test requires remote''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (*_lowerCamelCase : str )-> Optional[int]:
'''simple docstring'''
def decorate(cls : Optional[Any] ):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase ) and name.startswith('''test''' ):
for decorator in decorators:
__snake_case = decorator(_lowerCamelCase )
setattr(cls , _lowerCamelCase , _lowerCamelCase )
return cls
return decorate
class lowerCAmelCase ( __lowerCAmelCase):
pass
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : List[str] = 0
__lowercase : Dict = 1
__lowercase : List[Any] = 2
@contextmanager
def _UpperCamelCase (_lowerCamelCase : Dict=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : Optional[int]=1E-16 )-> Tuple:
'''simple docstring'''
__snake_case = requests.Session().request
def timeout_request(_lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : str , **_lowerCamelCase : Any ):
# Change the url to an invalid url so that the connection hangs
__snake_case = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__snake_case = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case = url
__snake_case = e.args[0]
__snake_case = (max_retry_error.args[0].replace('''10.255.255.1''' , f'''OfflineMock[{url}]''' ),)
__snake_case = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , **_lowerCamelCase : Dict ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=_lowerCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowerCamelCase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _UpperCamelCase (*_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : List[str] )-> Any:
'''simple docstring'''
__snake_case = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase ) as tmp_dir:
try:
os.chdir(_lowerCamelCase )
yield
finally:
os.chdir(_lowerCamelCase )
@contextmanager
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : int )-> Any:
'''simple docstring'''
return deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist()
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : int , *_lowerCamelCase : int , **_lowerCamelCase : Optional[int] ):
try:
return func(*_lowerCamelCase , **_lowerCamelCase )
except HTTPError as err:
if str(_lowerCamelCase ).startswith('''500''' ) or str(_lowerCamelCase ).startswith('''502''' ):
pytest.xfail(str(_lowerCamelCase ) )
raise err
return decorator.decorator(_wrapper , _lowerCamelCase )
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = returncode
__snake_case = stdout
__snake_case = stderr
async def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] )-> Dict:
'''simple docstring'''
while True:
__snake_case = await stream.readline()
if line:
callback(_lowerCamelCase )
else:
break
async def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Dict=False , _lowerCamelCase : List[Any]=False )-> _RunOutput:
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(_lowerCamelCase ) )
__snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case = []
__snake_case = []
def tee(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Dict="" ):
__snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(_lowerCamelCase )
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label='''stderr:''' ) ),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[Any]=1_80 , _lowerCamelCase : Dict=False , _lowerCamelCase : int=True )-> _RunOutput:
'''simple docstring'''
__snake_case = asyncio.get_event_loop()
__snake_case = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase ) )
__snake_case = ''' '''.join(_lowerCamelCase )
if result.returncode > 0:
__snake_case = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
__snake_case = re.sub(R'''^gw''' , '''''' , _lowerCamelCase , 0 , re.M )
return int(_lowerCamelCase )
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = 2_95_00
__snake_case = pytest_xdist_worker_id()
return port + uniq_delta
| 24 | 1 |
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> Dict:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def _UpperCamelCase (_lowerCamelCase : int )-> List[Any]:
'''simple docstring'''
__snake_case = np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase )
__snake_case = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : int = '''sigmoid'''
__lowercase : str = '''softmax'''
__lowercase : int = '''none'''
@add_end_docstrings(
__lowerCAmelCase , r'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[int] = False
__lowercase : Dict = ClassificationFunction.NONE
def __init__( self , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="" , **__SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__snake_case = tokenizer_kwargs
__snake_case = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
__snake_case = self.model.config.return_all_scores
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or top_k is None:
__snake_case = top_k
__snake_case = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , __SCREAMING_SNAKE_CASE , )
if return_all_scores:
__snake_case = None
else:
__snake_case = 1
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__snake_case = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = super().__call__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__snake_case = '''top_k''' not in kwargs
if isinstance(args[0] , __SCREAMING_SNAKE_CASE ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]:
'''simple docstring'''
__snake_case = self.framework
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.tokenizer(**__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0] , __SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return self.model(**__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__snake_case = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__snake_case = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
__snake_case = self.model.config.function_to_apply
else:
__snake_case = ClassificationFunction.NONE
__snake_case = model_outputs['''logits'''][0]
__snake_case = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__snake_case = sigmoid(__SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__snake_case = softmax(__SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.NONE:
__snake_case = outputs
else:
raise ValueError(F'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__snake_case = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(__SCREAMING_SNAKE_CASE )
]
if not _legacy:
dict_scores.sort(key=lambda __SCREAMING_SNAKE_CASE : x["score"] , reverse=__SCREAMING_SNAKE_CASE )
if top_k is not None:
__snake_case = dict_scores[:top_k]
return dict_scores
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__snake_case = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ : List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase_ : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 1 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(snake_case ) == len(snake_case ), f'''{len(snake_case )} != {len(snake_case )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
SCREAMING_SNAKE_CASE__ : List[str] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
try:
__magic_name__ :List[Any] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
f''' {n_student}''' )
return list(range(snake_case ) )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(f'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(snake_case ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __lowercase ( snake_case, snake_case = "student", snake_case = None, snake_case = None, snake_case=False, snake_case=None, snake_case=None, **snake_case, ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(snake_case, snake_case ):
AutoTokenizer.from_pretrained(snake_case ).save_pretrained(snake_case ) # purely for convenience
__magic_name__ :str = AutoModelForSeqaSeqLM.from_pretrained(snake_case ).eval()
else:
assert isinstance(snake_case, snake_case ), f'''teacher must be a model or string got type {type(snake_case )}'''
__magic_name__ :List[str] = teacher.config.to_diff_dict()
try:
__magic_name__ , __magic_name__ :List[str] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__magic_name__ :Union[str, Any] = teacher_e
if d is None:
__magic_name__ :int = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config, '''num_encoder_layers''' ):
__magic_name__ , __magic_name__ :List[Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__magic_name__ , __magic_name__ :List[Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__magic_name__ :Optional[Any] = teacher_e
if d is None:
__magic_name__ :Dict = teacher_d
if hasattr(teacher.config, '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(snake_case )
# Copy weights
__magic_name__ :Dict = teacher.config_class(**snake_case )
__magic_name__ :Optional[Any] = AutoModelForSeqaSeqLM.from_config(snake_case )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__magic_name__ :List[str] = student.load_state_dict(teacher.state_dict(), strict=snake_case )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__magic_name__ , __magic_name__ :int = list(range(snake_case ) ), list(range(snake_case ) )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
f''' {save_path}''' )
student.save_pretrained(snake_case )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__magic_name__ :List[int] = pick_layers_to_copy(snake_case, snake_case )
if d_layers_to_copy is None:
__magic_name__ :List[int] = pick_layers_to_copy(snake_case, snake_case )
try:
if hasattr(
snake_case, '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers, student.prophetnet.encoder.layers, snake_case )
copy_layers(teacher.prophetnet.decoder.layers, student.prophetnet.decoder.layers, snake_case )
else:
copy_layers(teacher.model.encoder.layers, student.model.encoder.layers, snake_case )
copy_layers(teacher.model.decoder.layers, student.model.decoder.layers, snake_case )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block, student.encoder.block, snake_case )
copy_layers(teacher.decoder.block, student.decoder.block, snake_case )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
__magic_name__ :Optional[int] = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(snake_case )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 0 |
'''simple docstring'''
import argparse
import os
import re
UpperCAmelCase_ : List[str] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCAmelCase_ : Tuple = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCAmelCase_ : Dict = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : bool = False )-> str:
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__snake_case = f.read()
__snake_case = content.split('''\n''' )
__snake_case = []
__snake_case = 0
while line_idx < len(_lowerCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__snake_case = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__snake_case = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__snake_case = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__snake_case = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : _re_identifier.search(_lowerCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_lowerCamelCase ) )
elif "\n".join(_lowerCamelCase ) != content:
return True
def _UpperCamelCase (_lowerCamelCase : bool = False )-> Tuple:
'''simple docstring'''
__snake_case = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for f in os.listdir(_lowerCamelCase ) if f.endswith('''.py''' )]
__snake_case = [sort_auto_mapping(_lowerCamelCase , overwrite=_lowerCamelCase ) for fname in fnames]
if not overwrite and any(_lowerCamelCase ):
__snake_case = [f for f, d in zip(_lowerCamelCase , _lowerCamelCase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_lowerCamelCase )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCAmelCase_ : List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 24 | 0 |
def _A ( ) -> list[list[int]]:
"""simple docstring"""
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
__snake_case = generate_large_matrix()
__snake_case = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _A ( _lowercase ) -> None:
"""simple docstring"""
assert all(row == sorted(_lowercase , reverse=_lowercase ) for row in grid )
assert all(list(_lowercase ) == sorted(_lowercase , reverse=_lowercase ) for col in zip(*_lowercase ) )
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = len(_lowercase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCamelCase = (left + right) // 2
__UpperCamelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCamelCase = mid + 1
else:
__UpperCamelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_lowercase )
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = len(grid[0] )
for i in range(len(_lowercase ) ):
__UpperCamelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(_lowercase ) * len(grid[0] )) - total
def _A ( _lowercase ) -> int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
for row in grid:
for i, number in enumerate(_lowercase ):
if number < 0:
total += len(_lowercase ) - i
break
return total
def _A ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('Running benchmarks' )
__UpperCamelCase = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCamelCase = timeit(f'''{func}(grid=grid)''' , setup=_lowercase , number=5_00 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 1 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase (*_lowerCamelCase : str , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=2 )-> str:
'''simple docstring'''
from .. import __version__
__snake_case = take_from
__snake_case = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__snake_case = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case = call_frame.filename
__snake_case = call_frame.lineno
__snake_case = call_frame.function
__snake_case , __snake_case = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 24 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> Dict:
# A local function to see if a dot lands in the circle.
def is_in_circle(_snake_case :float , _snake_case :float ) -> bool:
_A = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_A = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_snake_case ) )
# The ratio of the area for circle to square is pi/4.
_A = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :int , _snake_case :Callable[[float], float] , _snake_case :float = 0.0 , _snake_case :float = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(_snake_case , _snake_case ) ) for _ in range(_snake_case ) ) * (max_value - min_value)
def SCREAMING_SNAKE_CASE_ ( _snake_case :int , _snake_case :float = 0.0 , _snake_case :float = 1.0 ) -> None:
def identity_function(_snake_case :float ) -> float:
return x
_A = area_under_curve_estimator(
_snake_case , _snake_case , _snake_case , _snake_case )
_A = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print('''******************''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> None:
def function_to_integrate(_snake_case :float ) -> float:
return sqrt(4.0 - x * x )
_A = area_under_curve_estimator(
_snake_case , _snake_case , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : str )-> List[str]:
'''simple docstring'''
__snake_case = old_name
if "patch_embed" in old_name:
__snake_case , __snake_case , __snake_case = old_name.split('''.''' )
if layer == "0":
__snake_case = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__snake_case = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__snake_case = old_name.replace('''3''' , '''convolution2''' )
else:
__snake_case = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , _lowerCamelCase ):
__snake_case = R'''\b\d{2}\b'''
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
__snake_case = re.search(R'''\d\.\d\d.''' , _lowerCamelCase ).group()
else:
__snake_case = re.search(R'''\d\.\d.''' , _lowerCamelCase ).group()
if int(match[0] ) < 6:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
__snake_case = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__snake_case = '''intermediate_stages.''' + trimmed_name
else:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__snake_case = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__snake_case = str(int(match[2] ) - num_meta4D_last_stage )
__snake_case = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__snake_case = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__snake_case = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__snake_case = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__snake_case = trimmed_name.replace('''fc2''' , '''linear_out''' )
__snake_case = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , _lowerCamelCase ):
__snake_case = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__snake_case = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__snake_case = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__snake_case = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__snake_case = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__snake_case = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__snake_case = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__snake_case = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__snake_case = new_name.replace('''norm''' , '''layernorm''' )
__snake_case = '''efficientformer.''' + new_name
else:
__snake_case = '''efficientformer.encoder.''' + new_name
return new_name
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
for key in checkpoint.copy().keys():
__snake_case = checkpoint.pop(_lowerCamelCase )
__snake_case = val
return checkpoint
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def _UpperCamelCase (_lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : bool )-> Optional[Any]:
'''simple docstring'''
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
__snake_case = EfficientFormerConfig.from_json_file(_lowerCamelCase )
__snake_case = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
__snake_case = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__snake_case = config.depths[-1] - config.num_metaad_blocks + 1
__snake_case = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__snake_case = prepare_img()
__snake_case = 2_56
__snake_case = 2_24
__snake_case = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__snake_case = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
__snake_case = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
__snake_case = model(_lowerCamelCase )
__snake_case = outputs.logits
__snake_case = (1, 10_00)
if "l1" in model_name:
__snake_case = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__snake_case = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__snake_case = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCamelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 24 | 0 |
'''simple docstring'''
def A_( A : int):
if not isinstance(A , A):
UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(A)
if number < 0:
return False
UpperCamelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ) -> Tuple:
'''simple docstring'''
__snake_case = size if size is not None else {'''shortest_edge''': 20}
__snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_flip_channel_order
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Union[str, Any] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_flip_channel_order''' ) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 | 0 |
"""simple docstring"""
import math
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCAmelCase = F'Input value of [number={number}] must be an integer'
raise TypeError(_UpperCAmelCase )
if number < 1:
lowerCAmelCase = F'Input value of [number={number}] must be > 0'
raise ValueError(_UpperCAmelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowerCAmelCase = int(math.log(number // 3 , 2 ) ) + 2
lowerCAmelCase = [3, 5]
lowerCAmelCase = 2
lowerCAmelCase = 3
for block in range(1 , _UpperCAmelCase ):
for _ in range(_UpperCAmelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__UpperCamelCase : Optional[int] = 0
try:
__UpperCamelCase : List[str] = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 4 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "arrow" , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = load_from_cache_file
__snake_case = file_format
__snake_case = Spark(
df=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , working_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__snake_case = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__SCREAMING_SNAKE_CASE , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 24 | 0 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase = True , _lowercase = None , _lowercase = 32 , _lowercase = True , _lowercase = 1 / 255 , _lowercase = True , _lowercase = True , _lowercase = [0.4814_5466, 0.457_8275, 0.4082_1073] , _lowercase = [0.2686_2954, 0.2613_0258, 0.2757_7711] , _lowercase = True , _lowercase=7 , _lowercase=30 , _lowercase=400 , _lowercase=3 , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = do_resize
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 288}
_lowerCAmelCase = size_divisor
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = do_pad
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
def _lowercase ( self ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def _lowercase ( self , _lowercase , _lowercase=False ):
"""simple docstring"""
if not batched:
_lowerCAmelCase = self.size["""shortest_edge"""]
_lowerCAmelCase = image_inputs[0]
if isinstance(_lowercase , Image.Image ):
_lowerCAmelCase , _lowerCAmelCase = image.size
else:
_lowerCAmelCase , _lowerCAmelCase = image.shape[1], image.shape[2]
_lowerCAmelCase = size / min(_lowercase , _lowercase )
if h < w:
_lowerCAmelCase , _lowerCAmelCase = size, scale * w
else:
_lowerCAmelCase , _lowerCAmelCase = scale * h, size
_lowerCAmelCase = int((1_333 / 800) * size )
if max(_lowercase , _lowercase ) > max_size:
_lowerCAmelCase = max_size / max(_lowercase , _lowercase )
_lowerCAmelCase = newh * scale
_lowerCAmelCase = neww * scale
_lowerCAmelCase , _lowerCAmelCase = int(newh + 0.5 ), int(neww + 0.5 )
_lowerCAmelCase , _lowerCAmelCase = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
_lowerCAmelCase = []
for image in image_inputs:
_lowerCAmelCase , _lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCAmelCase = max(_lowercase , key=lambda _lowercase : item[0] )[0]
_lowerCAmelCase = max(_lowercase , key=lambda _lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Optional[int] = BridgeTowerImageProcessor if is_vision_available() else None
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = BridgeTowerImageProcessingTester(self )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """image_mean""" ) )
self.assertTrue(hasattr(_lowercase , """image_std""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
self.assertTrue(hasattr(_lowercase , """size_divisor""" ) )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 5 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Union[str, Any] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = LEDTokenizer
__lowercase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**__SCREAMING_SNAKE_CASE )
__snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case = '''post_processor'''
__snake_case = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
__snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case = tuple(state['''sep'''] )
if "cls" in state:
__snake_case = tuple(state['''cls'''] )
__snake_case = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = add_prefix_space
__snake_case = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
__snake_case = trim_offsets
__snake_case = True
if changes_to_apply:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
__snake_case = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
__snake_case = value
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
'''simple docstring'''
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> dict:
'''simple docstring'''
__snake_case = super()._pad(
encoded_inputs=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding_strategy=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
# Load from model defaults
if return_attention_mask is None:
__snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(__SCREAMING_SNAKE_CASE )
if needs_to_be_padded:
__snake_case = len(__SCREAMING_SNAKE_CASE ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 24 | 0 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase_ :
def __init__( self :List[Any] , __A :Dict , __A :Tuple=14 , __A :Optional[int]=7 , __A :Any=True , __A :Tuple=True , __A :List[str]=False , __A :int=True , __A :Optional[Any]=99 , __A :Any=32 , __A :Optional[int]=4 , __A :Optional[Any]=4 , __A :Union[str, Any]=4 , __A :Tuple=37 , __A :Tuple="gelu" , __A :Union[str, Any]=0.1 , __A :List[str]=0.1 , __A :Optional[int]=512 , __A :str=0.0_2 , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = rotary_dim
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = vocab_size - 1
SCREAMING_SNAKE_CASE__ = vocab_size - 1
SCREAMING_SNAKE_CASE__ = vocab_size - 1
def _snake_case ( self :str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__A , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def _snake_case ( self :int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _snake_case ( self :int , __A :List[Any] , __A :int , __A :Optional[Any] , __A :int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 20
SCREAMING_SNAKE_CASE__ = model_class_name(__A )
SCREAMING_SNAKE_CASE__ = model.init_cache(input_ids.shape[0] , __A )
SCREAMING_SNAKE_CASE__ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
SCREAMING_SNAKE_CASE__ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE__ = model(
input_ids[:, :-1] , attention_mask=__A , past_key_values=__A , position_ids=__A , )
SCREAMING_SNAKE_CASE__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE__ = model(
input_ids[:, -1:] , attention_mask=__A , past_key_values=outputs_cache.past_key_values , position_ids=__A , )
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def _snake_case ( self :Any , __A :str , __A :int , __A :Any , __A :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 20
SCREAMING_SNAKE_CASE__ = model_class_name(__A )
SCREAMING_SNAKE_CASE__ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
SCREAMING_SNAKE_CASE__ = model.init_cache(input_ids.shape[0] , __A )
SCREAMING_SNAKE_CASE__ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE__ = model(
input_ids[:, :-1] , attention_mask=__A , past_key_values=__A , position_ids=__A , )
SCREAMING_SNAKE_CASE__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE__ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__A , position_ids=__A , )
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A )
SCREAMING_SNAKE_CASE__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowerCamelCase_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def _snake_case ( self :str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxGPTJModelTester(self )
def _snake_case ( self :int ) -> Optional[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__A , __A , __A , __A )
def _snake_case ( self :int ) -> Optional[int]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__A , __A , __A , __A )
@tooslow
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
SCREAMING_SNAKE_CASE__ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=__A , truncation=__A )
SCREAMING_SNAKE_CASE__ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = model.config.eos_token_id
SCREAMING_SNAKE_CASE__ = jax.jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__A , skip_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(__A , __A )
@is_pt_flax_cross_test
def _snake_case ( self :Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A )
SCREAMING_SNAKE_CASE__ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE__ = getattr(__A , __A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = pt_model_class(__A ).eval()
SCREAMING_SNAKE_CASE__ = model_class(__A , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __A )
SCREAMING_SNAKE_CASE__ = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = pt_model(**__A ).to_tuple()
SCREAMING_SNAKE_CASE__ = fx_model(**__A ).to_tuple()
self.assertEqual(len(__A ) , len(__A ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(__A , __A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__A )
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(__A , from_pt=__A )
SCREAMING_SNAKE_CASE__ = fx_model_loaded(**__A ).to_tuple()
self.assertEqual(
len(__A ) , len(__A ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(__A , __A ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def _snake_case ( self :Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A )
SCREAMING_SNAKE_CASE__ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE__ = getattr(__A , __A )
SCREAMING_SNAKE_CASE__ = pt_model_class(__A ).eval()
SCREAMING_SNAKE_CASE__ = model_class(__A , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = load_flax_weights_in_pytorch_model(__A , fx_model.params )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = pt_model(**__A ).to_tuple()
SCREAMING_SNAKE_CASE__ = fx_model(**__A ).to_tuple()
self.assertEqual(len(__A ) , len(__A ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(__A , __A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__A )
SCREAMING_SNAKE_CASE__ = pt_model_class.from_pretrained(__A , from_flax=__A )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = pt_model_loaded(**__A ).to_tuple()
self.assertEqual(
len(__A ) , len(__A ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(__A , __A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def _snake_case ( self :Optional[Any] ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 6 |
'''simple docstring'''
from collections import deque
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
__snake_case = len(_lowerCamelCase )
__snake_case = deque()
__snake_case = [False for _ in range(_lowerCamelCase )]
__snake_case = [-1 for _ in range(_lowerCamelCase )]
__snake_case = index_of[:]
def strong_connect(_lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
__snake_case = index # the number when this node is seen
__snake_case = index # lowest rank node reachable from here
index += 1
stack.append(_lowerCamelCase )
__snake_case = True
for w in g[v]:
if index_of[w] == -1:
__snake_case = strong_connect(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__snake_case = []
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
while w != v:
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
components.append(_lowerCamelCase )
return index
__snake_case = []
for v in range(_lowerCamelCase ):
if index_of[v] == -1:
strong_connect(_lowerCamelCase , 0 , _lowerCamelCase )
return components
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = [[] for _ in range(_lowerCamelCase )]
for u, v in edges:
g[u].append(_lowerCamelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ : List[str] = 7
UpperCAmelCase_ : int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ : List[str] = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ : Tuple = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 24 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : list ) -> list:
'''simple docstring'''
_A = False
while is_sorted is False: # Until all the indices are traversed keep looping
_A = True
for i in range(0 , len(_snake_case ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_A , _A = input_list[i + 1], input_list[i]
# swapping if elements not in order
_A = False
for i in range(1 , len(_snake_case ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_A , _A = input_list[i + 1], input_list[i]
# swapping if elements not in order
_A = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
a = [int(x) for x in input().split()]
# inputing elements of the list in one line
a = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 7 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = BarthezTokenizer
__lowercase : Any = BarthezTokenizerFast
__lowercase : Dict = True
__lowercase : Optional[int] = True
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = [0, 57, 3018, 7_0307, 91, 2]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
| 24 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowercase__ : int = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> int:
for pegasus_name, hf_name in PATTERNS:
__A : Optional[Any] = k.replace(__snake_case , __snake_case )
return k
def _lowerCAmelCase ( __snake_case : dict , __snake_case : dict ) -> PegasusForConditionalGeneration:
__A : Dict = DEFAULTS.copy()
cfg_kwargs.update(__snake_case )
__A : List[Any] = PegasusConfig(**__snake_case )
__A : Dict = PegasusForConditionalGeneration(__snake_case )
__A : Tuple = torch_model.model.state_dict()
__A : int = {}
for k, v in tf_weights.items():
__A : int = rename_state_dict_key(__snake_case )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
__A : int = v.T
__A : Optional[int] = torch.tensor(__snake_case , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
__A : Optional[Any] = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__A : Tuple = mapping['shared.weight']
__A : Dict = mapping['shared.weight']
__A : int = {k: torch.zeros_like(__snake_case ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**__snake_case )
__A ,__A : List[Any] = torch_model.model.load_state_dict(__snake_case , strict=__snake_case )
__A : int = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def _lowerCAmelCase ( __snake_case : List[Any]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
__A : str = tf.train.list_variables(__snake_case )
__A : List[Any] = {}
__A : str = ['Adafactor', 'global_step']
for name, shape in tqdm(__snake_case , desc='converting tf checkpoint to dict' ):
__A : Optional[int] = any(pat in name for pat in ignore_name )
if skip_key:
continue
__A : Tuple = tf.train.load_variable(__snake_case , __snake_case )
__A : Dict = array
return tf_weights
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Tuple:
# save tokenizer first
__A : int = Path(__snake_case ).parent.name
__A : int = task_specific_params[f'summarization_{dataset}']['max_position_embeddings']
__A : str = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=__snake_case )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__snake_case )
# convert model
__A : Dict = get_tf_weights_as_numpy(__snake_case )
__A : List[str] = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
__A : str = task_specific_params
__A : Any = convert_pegasus(__snake_case , __snake_case )
torch_model.save_pretrained(__snake_case )
__A : Optional[int] = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(__snake_case , Path(__snake_case ) / 'pytorch_model.bin' )
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase__ : Any = parser.parse_args()
if args.save_dir is None:
lowercase__ : Tuple = Path(args.tf_ckpt_path).parent.name
lowercase__ : Any = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir) | 8 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ) -> Any:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Tuple = True
__lowercase : Optional[int] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__SCREAMING_SNAKE_CASE )
__snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class lowerCAmelCase ( unittest.TestCase):
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__snake_case = jnp.array([[0, 1, 2, 3, 4, 5]] )
__snake_case = model(__SCREAMING_SNAKE_CASE )[0]
__snake_case = 5_0000
__snake_case = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__snake_case = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 24 | 0 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
SCREAMING_SNAKE_CASE__ = ['''small''', '''medium''', '''large''']
SCREAMING_SNAKE_CASE__ = '''lm_head.decoder.weight'''
SCREAMING_SNAKE_CASE__ = '''lm_head.weight'''
def A ( __UpperCamelCase , __UpperCamelCase ) -> Any:
A__ = torch.load(__UpperCamelCase )
A__ = d.pop(__UpperCamelCase )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
torch.save(__UpperCamelCase , os.path.join(__UpperCamelCase , __UpperCamelCase ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
SCREAMING_SNAKE_CASE__ = os.path.join(args.dialogpt_path, f'{MODEL}_ft.pkl')
SCREAMING_SNAKE_CASE__ = f'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 9 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
__snake_case = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] )-> Tuple:
'''simple docstring'''
__snake_case = dct.pop(_lowerCamelCase )
__snake_case = val
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple )-> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__snake_case = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
__snake_case = qkv_bias
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Tuple )-> Dict:
'''simple docstring'''
__snake_case = 3_64 if '''coco''' in model_name else 2_24
__snake_case = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__snake_case = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=False )-> Dict:
'''simple docstring'''
__snake_case = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__snake_case = tokenizer('''\n''' , add_special_tokens=_lowerCamelCase ).input_ids[0]
__snake_case , __snake_case = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
__snake_case = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
__snake_case = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__snake_case , __snake_case = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case , __snake_case , __snake_case = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
__snake_case = original_model.state_dict()
__snake_case = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__snake_case = state_dict.pop(_lowerCamelCase )
if key.startswith('''Qformer.bert''' ):
__snake_case = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__snake_case = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__snake_case = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__snake_case = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__snake_case = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__snake_case = key.replace('''t5''' , '''language''' )
__snake_case = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__snake_case = load_demo_image()
__snake_case = vis_processors['''eval'''](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
__snake_case = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
# create processor
__snake_case = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__snake_case = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
__snake_case = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
__snake_case = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__snake_case = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__snake_case = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__snake_case = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
__snake_case = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__snake_case = ''''''
__snake_case = tokenizer(_lowerCamelCase , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
__snake_case = original_model.generate({'''image''': original_pixel_values} )
__snake_case = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , _lowerCamelCase )
__snake_case = input_ids.shape[1]
__snake_case = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
__snake_case = [text.strip() for text in output_text]
print('''HF generation:''' , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
UpperCAmelCase_ : Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24 | 0 |
from __future__ import annotations
_lowerCAmelCase = []
def _snake_case ( __snake_case , __snake_case , __snake_case ):
for i in range(len(__snake_case ) ):
if board[row][i] == 1:
return False
for i in range(len(__snake_case ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , len(__snake_case ) ) ):
if board[i][j] == 1:
return False
return True
def _snake_case ( __snake_case , __snake_case ):
if row >= len(__snake_case ):
solution.append(__snake_case )
printboard(__snake_case )
print()
return True
for i in range(len(__snake_case ) ):
if is_safe(__snake_case , __snake_case , __snake_case ):
_UpperCamelCase = 1
solve(__snake_case , row + 1 )
_UpperCamelCase = 0
return False
def _snake_case ( __snake_case ):
for i in range(len(__snake_case ) ):
for j in range(len(__snake_case ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
_lowerCAmelCase = 8
_lowerCAmelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 10 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case , __snake_case = image[0].size
__snake_case , __snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return image
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__snake_case , __snake_case = mask[0].size
__snake_case , __snake_case = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__snake_case = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = mask.astype(np.floataa ) / 255.0
__snake_case = 0
__snake_case = 1
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return mask
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : UNetaDModel
__lowercase : RePaintScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 250 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__snake_case = image
__snake_case = _preprocess_image(__SCREAMING_SNAKE_CASE )
__snake_case = original_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = _preprocess_mask(__SCREAMING_SNAKE_CASE )
__snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case = original_image.shape
__snake_case = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
__snake_case = eta
__snake_case = self.scheduler.timesteps[0] + 1
__snake_case = generator[0] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
__snake_case = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__snake_case = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = t
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 24 | 0 |
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def lowerCAmelCase ():
"""simple docstring"""
_a = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=__A , default=__A , required=__A , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=__A , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=__A , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=__A , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=__A , default=0 , help='''cuda_id.''' , )
_a = parser.parse_args()
return args
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if not len(__A) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''')
_a , _a = imgs[0].size
_a = Image.new('''RGB''' , size=(cols * w, rows * h))
_a , _a = grid.size
for i, img in enumerate(__A):
grid.paste(__A , box=(i % cols * w, i // cols * h))
return grid
def lowerCAmelCase (__A , __A="robotic cat with wings" , __A=7.5 , __A=50 , __A=1 , __A=42 , ):
"""simple docstring"""
_a = torch.Generator(pipeline.device).manual_seed(__A)
_a = pipeline(
__A , guidance_scale=__A , num_inference_steps=__A , generator=__A , num_images_per_prompt=__A , ).images
_a = int(math.sqrt(__A))
_a = image_grid(__A , rows=_rows , cols=num_images_per_prompt // _rows)
return grid, images
lowercase_ = parse_args()
# Load models and create wrapper for stable diffusion
lowercase_ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
lowercase_ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
lowercase_ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
lowercase_ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
lowercase_ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase_ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
lowercase_ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
lowercase_ = unet.to(torch.device("cuda", args.cuda_id))
lowercase_ = pipeline.to(unet.device)
lowercase_ , lowercase_ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
lowercase_ = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 11 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = {}
if "candidate_labels" in kwargs:
__snake_case = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__snake_case = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="This is a photo of {}." ) -> Optional[Any]:
'''simple docstring'''
__snake_case = load_image(__SCREAMING_SNAKE_CASE )
__snake_case = self.image_processor(images=[image] , return_tensors=self.framework )
__snake_case = candidate_labels
__snake_case = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__snake_case = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__snake_case = [text_inputs]
return inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = model_inputs.pop('''candidate_labels''' )
__snake_case = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = text_inputs[0]
else:
# Batching case.
__snake_case = text_inputs[0][0]
__snake_case = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = model_outputs.pop('''candidate_labels''' )
__snake_case = model_outputs['''logits'''][0]
if self.framework == "pt":
__snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
__snake_case = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [scores]
elif self.framework == "tf":
__snake_case = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__snake_case = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__snake_case = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 24 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ : str = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 12 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def lowercase_ ( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
__lowerCamelCase : Dict = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : Optional[int] = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 )
self.assertGreater(len(SCREAMING_SNAKE_CASE_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE_ , {
'score': ANY(SCREAMING_SNAKE_CASE_ ),
'label': ANY(SCREAMING_SNAKE_CASE_ ),
'box': {'xmin': ANY(SCREAMING_SNAKE_CASE_ ), 'ymin': ANY(SCREAMING_SNAKE_CASE_ ), 'xmax': ANY(SCREAMING_SNAKE_CASE_ ), 'ymax': ANY(SCREAMING_SNAKE_CASE_ )},
} , )
import datasets
__lowerCamelCase : Any = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__lowerCamelCase : str = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__lowerCamelCase : Any = object_detector(SCREAMING_SNAKE_CASE_ , threshold=0.0 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for outputs in batch_outputs:
self.assertGreater(len(SCREAMING_SNAKE_CASE_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE_ , {
'score': ANY(SCREAMING_SNAKE_CASE_ ),
'label': ANY(SCREAMING_SNAKE_CASE_ ),
'box': {'xmin': ANY(SCREAMING_SNAKE_CASE_ ), 'ymin': ANY(SCREAMING_SNAKE_CASE_ ), 'xmax': ANY(SCREAMING_SNAKE_CASE_ ), 'ymax': ANY(SCREAMING_SNAKE_CASE_ )},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def lowercase_ ( self ) -> Any:
pass
@require_torch
def lowercase_ ( self ) -> str:
__lowerCamelCase : Union[str, Any] = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__lowerCamelCase : Optional[Any] = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
] , )
__lowerCamelCase : Tuple = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
],
] , )
@require_torch
@slow
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : int = 'facebook/detr-resnet-50'
__lowerCamelCase : Dict = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
__lowerCamelCase : Dict = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
] , )
@require_torch
@slow
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Optional[int] = 'facebook/detr-resnet-50'
__lowerCamelCase : str = pipeline('object-detection' , model=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
__lowerCamelCase : str = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
] , )
@require_torch
@slow
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : List[Any] = 0.9_9_8_5
__lowerCamelCase : Optional[int] = 'facebook/detr-resnet-50'
__lowerCamelCase : int = pipeline('object-detection' , model=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Optional[Any] = 'Narsil/layoutlmv3-finetuned-funsd'
__lowerCamelCase : Dict = 0.9_9_9_3
__lowerCamelCase : int = pipeline('object-detection' , model=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}},
] , )
| 13 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) )
def _UpperCamelCase ()-> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase : Callable , _lowerCamelCase : int ) -> None:
__snake_case = f'''{func.__name__}({value})'''
__snake_case = timeit(f'''__main__.{call}''' , setup='''import __main__''' )
print(f'''{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 0 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
a__ = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self , _a , _a , _a = None , _a = None ) -> List[Any]:
_a : Union[str, Any] = None
_a : Optional[Any] = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
_a : Optional[int] = os.path.abspath('''examples''' )
for item in os.listdir(_a ):
if item not in EXCLUDE_EXAMPLES:
_a : Any = os.path.join(_a , _a )
if os.path.isfile(_a ) and ".py" in item_path:
with self.subTest(
tested_script=_a , feature_script=_a , tested_section='''main()''' if parser_only else '''training_function()''' , ):
_a : Optional[int] = compare_against_test(
os.path.join(_a , _a ) , _a , _a , _a )
_a : Union[str, Any] = '''\n'''.join(_a )
if special_strings is not None:
for string in special_strings:
_a : Union[str, Any] = diff.replace(_a , '''''' )
self.assertEqual(_a , '''''' )
def __lowercase ( self ) -> Optional[Any]:
self.one_complete_example('''complete_nlp_example.py''' , _a )
self.one_complete_example('''complete_nlp_example.py''' , _a )
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[int] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
_a : int = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , _a , _a , _a )
self.one_complete_example('''complete_cv_example.py''' , _a , _a , _a )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Any = False
@classmethod
def __lowercase ( cls ) -> List[Any]:
super().setUpClass()
_a : str = tempfile.mkdtemp()
_a : str = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_a : int = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def __lowercase ( cls ) -> Optional[int]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def __lowercase ( self ) -> List[str]:
_a : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
_a : List[str] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def __lowercase ( self ) -> Any:
_a : Dict = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
_a : str = run_command(self._launch_args + testargs , return_stdout=_a )
self.assertNotIn('''epoch 0:''' , _a )
self.assertIn('''epoch 1:''' , _a )
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
_a : Optional[int] = run_command(self._launch_args + testargs , return_stdout=_a )
if torch.cuda.is_available():
_a : List[Any] = torch.cuda.device_count()
else:
_a : Tuple = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , _a )
self.assertIn('''epoch 1:''' , _a )
else:
self.assertIn('''epoch 0:''' , _a )
self.assertIn('''epoch 1:''' , _a )
@slow
def __lowercase ( self ) -> Union[str, Any]:
_a : List[str] = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
_a : Tuple = run_command(self._launch_args + testargs , return_stdout=_a )
_a : int = re.findall('''({.+})''' , _a )
_a : int = [r for r in results if '''accuracy''' in r][-1]
_a : Optional[Any] = ast.literal_eval(_a )
self.assertGreaterEqual(results['''accuracy'''] , 0.75 )
def __lowercase ( self ) -> str:
_a : Optional[int] = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowercase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
_a : str = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_a , '''tracking''' ) ) )
def __lowercase ( self ) -> Optional[int]:
_a : List[str] = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def __lowercase ( self ) -> List[Any]:
_a : Union[str, Any] = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 14 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__snake_case = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_lowerCamelCase ) , '''Postfix'''.center(_lowerCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
__snake_case = ''')''' # change "(" to ")"
elif infix[i] == ")":
__snake_case = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase_ : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 24 | 0 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
A : List[Any] = open # noqa: we just need to have a builtin inside this module to test it properly
| 15 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase):
__lowercase : List[Any] = '''swin'''
__lowercase : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=96 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(__SCREAMING_SNAKE_CASE )
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__snake_case = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[int] = version.parse('''1.11''')
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 24 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__A : int = logging.get_logger(__name__)
__A : str = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def __a ( A__ : Optional[Any] ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE = k.replace(A__ , A__ )
if k.startswith("encoder" ):
SCREAMING_SNAKE_CASE = k.replace(".attn" , ".self_attn" )
SCREAMING_SNAKE_CASE = k.replace("norm1" , "self_attn_layer_norm" )
SCREAMING_SNAKE_CASE = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
SCREAMING_SNAKE_CASE = k.replace("norm1" , "self_attn_layer_norm" )
SCREAMING_SNAKE_CASE = k.replace("norm2" , "encoder_attn_layer_norm" )
SCREAMING_SNAKE_CASE = k.replace("norm3" , "final_layer_norm" )
return k
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
SCREAMING_SNAKE_CASE = sd.pop(A__ )
SCREAMING_SNAKE_CASE = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
SCREAMING_SNAKE_CASE = v
__A : Tuple = ['START']
@torch.no_grad()
def __a ( A__ : List[Any] , A__ : Union[str, Any] , A__ : Any ):
SCREAMING_SNAKE_CASE = torch.load(A__ , map_location="cpu" )
SCREAMING_SNAKE_CASE = model["model"]
SCREAMING_SNAKE_CASE = BlenderbotConfig.from_json_file(A__ )
SCREAMING_SNAKE_CASE = BlenderbotForConditionalGeneration(A__ )
SCREAMING_SNAKE_CASE = m.model.state_dict().keys()
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
SCREAMING_SNAKE_CASE = rename_state_dict_key(A__ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
SCREAMING_SNAKE_CASE = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(A__ )
m.model.load_state_dict(A__ , strict=A__ )
m.half()
m.save_pretrained(A__ )
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
__A : Optional[int] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 16 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = int(_lowerCamelCase )
__snake_case , __snake_case , __snake_case = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : List[Any]=3_00 )-> int:
'''simple docstring'''
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _UpperCamelCase (_lowerCamelCase : int )-> List[Any]:
'''simple docstring'''
__snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__snake_case = f'''{elt:.6f}''' if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(_lowerCamelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCAmelCase :
__lowercase : str = 5
__lowercase : Optional[Any] = 0.2
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 300 , ) -> List[Any]:
'''simple docstring'''
__snake_case = total
__snake_case = '''''' if prefix is None else prefix
__snake_case = leave
__snake_case = parent
__snake_case = width
__snake_case = None
__snake_case = None
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None ) -> Any:
'''simple docstring'''
__snake_case = value
if comment is not None:
__snake_case = comment
if self.last_value is None:
__snake_case = __snake_case = time.time()
__snake_case = __snake_case = value
__snake_case = __snake_case = None
__snake_case = self.warmup
__snake_case = 1
self.update_bar(__SCREAMING_SNAKE_CASE )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__snake_case = time.time()
__snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__snake_case = self.elapsed_time / (value - self.start_value)
else:
__snake_case = None
if value >= self.total:
__snake_case = self.total
__snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__SCREAMING_SNAKE_CASE )
__snake_case = value
__snake_case = current_time
if self.average_time_per_item is None:
__snake_case = 1
else:
__snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__SCREAMING_SNAKE_CASE ) )) + str(__SCREAMING_SNAKE_CASE )
if self.elapsed_time is None:
__snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__snake_case = None if column_names is None else [column_names]
__snake_case = None
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if self.inner_table is None:
__snake_case = [list(values.keys() ), list(values.values() )]
else:
__snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__SCREAMING_SNAKE_CASE )
__snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=300 ) -> List[str]:
'''simple docstring'''
__snake_case = NotebookProgressBar(__SCREAMING_SNAKE_CASE , prefix=__SCREAMING_SNAKE_CASE , parent=self , width=__SCREAMING_SNAKE_CASE )
return self.child_bar
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = None
self.display()
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self ) -> str:
'''simple docstring'''
__snake_case = None
__snake_case = None
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__snake_case = 0
__snake_case = 0
__snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__snake_case = NotebookTrainingTracker(state.max_steps , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if not has_length(__SCREAMING_SNAKE_CASE ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__snake_case = self.training_tracker.add_child(len(__SCREAMING_SNAKE_CASE ) )
else:
__snake_case = NotebookProgressBar(len(__SCREAMING_SNAKE_CASE ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__snake_case = state.global_step
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if self.training_tracker is not None:
__snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__snake_case = log['''loss''']
break
if self.first_column == "Epoch":
__snake_case = int(state.epoch )
else:
__snake_case = state.global_step
__snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__snake_case = re.sub(r'''\_loss$''' , '''''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''total_flos''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''epoch''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __SCREAMING_SNAKE_CASE )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__snake_case = v
else:
__snake_case = k.split('''_''' )
__snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
__snake_case = v
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
self.training_tracker.remove_child()
__snake_case = None
# Evaluation takes a long time so we should force the next update.
__snake_case = True
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__SCREAMING_SNAKE_CASE )
__snake_case = None
| 24 | 0 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> str:
def wrapper(*a__ : List[Any] ,**a__ : str ):
__A : List[Any] = timeit.default_timer()
__A : Any = func(*a__ ,**a__ )
__A : Union[str, Any] = timeit.default_timer() - starttime
return delta
__A : Dict = func.__name__
return wrapper
def __SCREAMING_SNAKE_CASE ( a__ : dict ,a__ : Dict=100 ,a__ : Optional[Any]=None ) -> Dict:
__A : Optional[int] = []
__A : Dict = seq_shapes or {}
for i in range(a__ ):
__A : Optional[Any] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(a__ ,_ArrayXD ):
__A : Any = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(a__ ,datasets.Value ):
if v.dtype == "string":
__A : List[str] = """The small grey turtle was surprisingly fast when challenged."""
else:
__A : int = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(a__ ,datasets.Sequence ):
while isinstance(a__ ,datasets.Sequence ):
__A : str = v.feature
__A : List[Any] = seq_shapes[k]
__A : Dict = np.random.rand(*a__ ).astype(v.dtype )
__A : Any = data
dummy_data.append((i, example) )
return dummy_data
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Optional[int] ,a__ : Any=100 ,a__ : Optional[Any]=None ) -> str:
__A : int = generate_examples(a__ ,num_examples=a__ ,seq_shapes=a__ )
with ArrowWriter(features=a__ ,path=a__ ) as writer:
for key, record in dummy_data:
__A : List[str] = features.encode_example(a__ )
writer.write(a__ )
__A , __A : int = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
__A : Optional[Any] = datasets.Dataset.from_file(filename=a__ ,info=datasets.DatasetInfo(features=a__ ) )
return dataset
| 17 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> int:
'''simple docstring'''
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __a(SCREAMING_SNAKE_CASE_ : NDArray[floataa] , SCREAMING_SNAKE_CASE_ : NDArray[floataa] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int , ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = coefficient_matrix.shape
_lowerCAmelCase , _lowerCAmelCase = constant_matrix.shape
if rowsa != colsa:
_lowerCAmelCase = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
if colsa != 1:
_lowerCAmelCase = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
if rowsa != rowsa:
_lowerCAmelCase = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) != rowsa:
_lowerCAmelCase = (
"Number of initial values must be equal to number of rows in coefficient "
F'''matrix but received {len(SCREAMING_SNAKE_CASE_ )} and {rowsa}'''
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
_lowerCAmelCase = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_lowerCAmelCase , _lowerCAmelCase = table.shape
strictly_diagonally_dominant(SCREAMING_SNAKE_CASE_ )
# Iterates the whole matrix for given number of times
for _ in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = []
for row in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = 0
for col in range(SCREAMING_SNAKE_CASE_ ):
if col == row:
_lowerCAmelCase = table[row][col]
elif col == cols - 1:
_lowerCAmelCase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_lowerCAmelCase = (temp + val) / denom
new_val.append(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = new_val
return [float(SCREAMING_SNAKE_CASE_ ) for i in new_val]
def __a(SCREAMING_SNAKE_CASE_ : NDArray[floataa] ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = table.shape
_lowerCAmelCase = True
for i in range(0 , SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = []
for part_id in partition_order:
__snake_case = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(2 )
__snake_case = [1, 0]
__snake_case = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(1 )
__snake_case = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__snake_case = lambda _lowerCamelCase : x.reverse()
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 24 | 0 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
_a = logging.get_logger(__name__)
_a = {}
_a = {}
_a = {}
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case = None, ) -> int:
"""simple docstring"""
_UpperCamelCase = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
_UpperCamelCase = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
_UpperCamelCase = format_type
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case = None ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_UpperCamelCase = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
_a = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
_a = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
_a = ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def lowerCamelCase__ ( __snake_case ) -> Optional[str]:
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def lowerCamelCase__ ( __snake_case, **__snake_case ) -> Formatter:
"""simple docstring"""
_UpperCamelCase = get_format_type_from_alias(__snake_case )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__snake_case )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 19 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> float:
'''simple docstring'''
__snake_case = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _UpperCamelCase ()-> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.