code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE :Optional[Any] = 'tiny-wmt19-en-ru'
# Build
# borrowed from a test
SCREAMING_SNAKE_CASE :Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
SCREAMING_SNAKE_CASE :Optional[Any] = dict(zip(vocab, range(len(vocab))))
SCREAMING_SNAKE_CASE :Tuple = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE :Optional[int] = Path(tmpdirname)
SCREAMING_SNAKE_CASE :Optional[int] = build_dir / VOCAB_FILES_NAMES['src_vocab_file']
SCREAMING_SNAKE_CASE :Any = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file']
SCREAMING_SNAKE_CASE :str = build_dir / VOCAB_FILES_NAMES['merges_file']
with open(src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, 'w') as fp:
fp.write('\n'.join(merges))
SCREAMING_SNAKE_CASE :Tuple = FSMTTokenizer(
langs=['en', 'ru'],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
SCREAMING_SNAKE_CASE :List[str] = FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
SCREAMING_SNAKE_CASE :Tuple = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
SCREAMING_SNAKE_CASE :List[str] = tokenizer(['Making tiny model'], return_tensors='pt')
SCREAMING_SNAKE_CASE :str = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 15 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ):
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ):
'''simple docstring'''
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
SCREAMING_SNAKE_CASE_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
A__ : str = logging.get_logger(__name__)
class lowercase__ ( snake_case__ ):
def __init__( self : Optional[Any] , *snake_case__ : int , **snake_case__ : Any ):
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 351 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A__ : Tuple = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
A__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 209 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
UpperCamelCase__ = TypeVar("""T""")
class a__ ( Generic[T] ):
def __init__( self , _A ):
"""simple docstring"""
__lowerCAmelCase = data
__lowerCAmelCase = None
def __str__( self ):
"""simple docstring"""
return f"""{self.data}"""
class a__ ( Generic[T] ):
def __init__( self ):
"""simple docstring"""
__lowerCAmelCase = None
def __iter__( self ):
"""simple docstring"""
__lowerCAmelCase = self.top
while node:
yield node.data
__lowerCAmelCase = node.next
def __str__( self ):
"""simple docstring"""
return "->".join([str(_A ) for item in self] )
def __len__( self ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.top is None
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = Node(_A )
if not self.is_empty():
__lowerCAmelCase = self.top
__lowerCAmelCase = node
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , _A )
__lowerCAmelCase = self.top
__lowerCAmelCase = self.top.next
return pop_node.data
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 92 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowercase_ ( enum.Enum ):
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@add_end_docstrings(a__ )
class lowercase_ ( a__ ):
__UpperCAmelCase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *a , **a ):
super().__init__(*a , **a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCamelCase__ = None
if self.model.config.prefix is not None:
UpperCamelCase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCamelCase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._sanitize_parameters(prefix=a , **self._forward_params )
UpperCamelCase__ = {**self._preprocess_params, **preprocess_params}
UpperCamelCase__ = {**self._forward_params, **forward_params}
def __a ( self , a=None , a=None , a=None , a=None , a=None , a=None , a=None , a=None , **a , ):
UpperCamelCase__ = {}
if prefix is not None:
UpperCamelCase__ = prefix
if prefix:
UpperCamelCase__ = self.tokenizer(
a , padding=a , add_special_tokens=a , return_tensors=self.framework )
UpperCamelCase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
UpperCamelCase__ = handle_long_generation
preprocess_params.update(a )
UpperCamelCase__ = generate_kwargs
UpperCamelCase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
UpperCamelCase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
UpperCamelCase__ = ReturnType.TENSORS
if return_type is not None:
UpperCamelCase__ = return_type
if clean_up_tokenization_spaces is not None:
UpperCamelCase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCamelCase__ = self.tokenizer.encode(a , add_special_tokens=a )
if len(a ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
UpperCamelCase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self , *a , **a ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*a , **a )
def __call__( self , a , **a ):
return super().__call__(a , **a )
def __a ( self , a , a="" , a=None , **a ):
UpperCamelCase__ = self.tokenizer(
prefix + prompt_text , padding=a , add_special_tokens=a , return_tensors=self.framework )
UpperCamelCase__ = prompt_text
if handle_long_generation == "hole":
UpperCamelCase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCamelCase__ = generate_kwargs["max_new_tokens"]
else:
UpperCamelCase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCamelCase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
UpperCamelCase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
UpperCamelCase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def __a ( self , a , **a ):
UpperCamelCase__ = model_inputs["input_ids"]
UpperCamelCase__ = model_inputs.get("attention_mask" , a )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = 1
else:
UpperCamelCase__ = input_ids.shape[0]
UpperCamelCase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCamelCase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
UpperCamelCase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCamelCase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCamelCase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCamelCase__ = self.model.generate(input_ids=a , attention_mask=a , **a )
UpperCamelCase__ = generated_sequence.shape[0]
if self.framework == "pt":
UpperCamelCase__ = generated_sequence.reshape(a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCamelCase__ = tf.reshape(a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __a ( self , a , a=ReturnType.FULL_TEXT , a=True ):
UpperCamelCase__ = model_outputs["generated_sequence"][0]
UpperCamelCase__ = model_outputs["input_ids"]
UpperCamelCase__ = model_outputs["prompt_text"]
UpperCamelCase__ = generated_sequence.numpy().tolist()
UpperCamelCase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCamelCase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCamelCase__ = self.tokenizer.decode(
a , skip_special_tokens=a , clean_up_tokenization_spaces=a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCamelCase__ = 0
else:
UpperCamelCase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=a , clean_up_tokenization_spaces=a , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCamelCase__ = prompt_text + text[prompt_length:]
else:
UpperCamelCase__ = text[prompt_length:]
UpperCamelCase__ = {"generated_text": all_text}
records.append(a )
return records
| 80 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = KandinskyVaaControlnetPipeline
__lowerCAmelCase : Any = ['image_embeds', 'negative_image_embeds', 'hint']
__lowerCAmelCase : str = ['image_embeds', 'negative_image_embeds', 'hint']
__lowerCAmelCase : List[Any] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__lowerCAmelCase : List[str] = False
@property
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
return 100
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCAmelCase : Dict = UNetaDConditionModel(**lowercase__ )
return model
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Any = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.dummy_unet
UpperCAmelCase : List[Any] = self.dummy_movq
UpperCAmelCase : List[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=lowercase__ , set_alpha_to_one=lowercase__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowercase__ , )
UpperCAmelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
UpperCAmelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase__ )
# create hint
UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
if str(lowercase__ ).startswith("""mps""" ):
UpperCAmelCase : Dict = torch.manual_seed(lowercase__ )
else:
UpperCAmelCase : str = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
UpperCAmelCase : Optional[Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Dict = """cpu"""
UpperCAmelCase : Tuple = self.get_dummy_components()
UpperCAmelCase : int = self.pipeline_class(**lowercase__ )
UpperCAmelCase : Optional[int] = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
UpperCAmelCase : Optional[Any] = pipe(**self.get_dummy_inputs(lowercase__ ) )
UpperCAmelCase : Any = output.images
UpperCAmelCase : Any = pipe(
**self.get_dummy_inputs(lowercase__ ) , return_dict=lowercase__ , )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Optional[Any] = np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
UpperCAmelCase : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
UpperCAmelCase : str = torch.from_numpy(np.array(lowercase__ ) ).float() / 255.0
UpperCAmelCase : List[str] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCAmelCase : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowercase__ )
UpperCAmelCase : Any = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
UpperCAmelCase : List[Any] = pipeline.to(lowercase__ )
pipeline.set_progress_bar_config(disable=lowercase__ )
UpperCAmelCase : str = """A robot, 4k photo"""
UpperCAmelCase : Tuple = torch.Generator(device="""cuda""" ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase : int = pipe_prior(
lowercase__ , generator=lowercase__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCAmelCase : Optional[int] = torch.Generator(device="""cuda""" ).manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipeline(
image_embeds=lowercase__ , negative_image_embeds=lowercase__ , hint=lowercase__ , generator=lowercase__ , num_inference_steps=100 , output_type="""np""" , )
UpperCAmelCase : str = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__ )
| 358 |
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
A: List[Any] = get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( enum.Enum ):
__lowerCAmelCase : Dict = 'all_checks'
__lowerCAmelCase : int = 'basic_checks'
__lowerCAmelCase : Optional[Any] = 'no_checks'
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
def _snake_case ( UpperCamelCase : Optional[dict] , UpperCamelCase : dict , UpperCamelCase : int=None ):
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(UpperCamelCase ) - set(UpperCamelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(UpperCamelCase ) - set(UpperCamelCase ) ) )
if len(set(UpperCamelCase ) - set(UpperCamelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(UpperCamelCase ) - set(UpperCamelCase ) ) )
UpperCAmelCase : Tuple = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
UpperCAmelCase : Union[str, Any] = """ for """ + verification_name if verification_name is not None else """"""
if len(UpperCamelCase ) > 0:
raise NonMatchingChecksumError(
F"Checksums didn't match{for_verification_name}:\n"
F"{bad_urls}\n"
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
def _snake_case ( UpperCamelCase : Optional[dict] , UpperCamelCase : dict ):
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(UpperCamelCase ) - set(UpperCamelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(UpperCamelCase ) - set(UpperCamelCase ) ) )
if len(set(UpperCamelCase ) - set(UpperCamelCase ) ) > 0:
raise UnexpectedSplits(str(set(UpperCamelCase ) - set(UpperCamelCase ) ) )
UpperCAmelCase : List[str] = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(UpperCamelCase ) > 0:
raise NonMatchingSplitsSizesError(str(UpperCamelCase ) )
logger.info("""All the splits matched successfully.""" )
def _snake_case ( UpperCamelCase : str , UpperCamelCase : bool = True ):
if record_checksum:
UpperCAmelCase : Dict = shaaaa()
with open(UpperCamelCase , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"""""" ):
m.update(UpperCamelCase )
UpperCAmelCase : Any = m.hexdigest()
else:
UpperCAmelCase : Dict = None
return {"num_bytes": os.path.getsize(UpperCamelCase ), "checksum": checksum}
def _snake_case ( UpperCamelCase : Union[str, Any] ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 76 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
__a = tempfile.mkdtemp()
# fmt: off
__a = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__a = {
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
__a = os.path.join(self.tmpdirname , lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
def a__ ( self , **lowerCamelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def a__ ( self , **lowerCamelCase ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase )
def a__ ( self ):
shutil.rmtree(self.tmpdirname )
def a__ ( self ):
__a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self ):
__a = self.get_tokenizer()
__a = self.get_image_processor()
__a = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__a = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def a__ ( self ):
__a = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__a = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__a = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def a__ ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__a = self.prepare_image_inputs()
__a = image_processor(lowerCamelCase , return_tensors="np" )
__a = processor(images=lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a__ ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__a = "lower newer"
__a = processor(text=lowerCamelCase )
__a = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a__ ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__a = "lower newer"
__a = self.prepare_image_inputs()
__a = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(lowerCamelCase ):
processor()
def a__ ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a = processor.batch_decode(lowerCamelCase )
__a = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__a = "lower newer"
__a = self.prepare_image_inputs()
__a = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 261 | """simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=50 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = use_labels
__a = scope
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def a__ ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = True
__a = BertGenerationDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval()
# first forward pass
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , )
__a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat([input_mask, next_mask] , dim=-1 )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ):
__a = BertGenerationDecoder(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self ):
__a , __a , __a , __a = self.prepare_config_and_inputs()
__a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_snake_case : Any = (BertGenerationDecoder,) if is_torch_available() else ()
_snake_case : Union[str, Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def a__ ( self ):
__a = BertGenerationEncoderTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a , __a , __a , __a = self.model_tester.prepare_config_and_inputs()
__a = "bert"
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase )
def a__ ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__a = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(lowerCamelCase )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
| 261 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =IFPipeline
lowerCamelCase__ =TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowerCamelCase__ =TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ =PipelineTesterMixin.required_optional_params - {'latents'}
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._get_dummy_components()
def SCREAMING_SNAKE_CASE (self , a_ , a_=0 ):
'''simple docstring'''
if str(a_ ).startswith('''mps''' ):
__snake_case : Any = torch.manual_seed(a_ )
else:
__snake_case : Optional[int] = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self._test_save_load_local()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
__snake_case : str = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=a_ , tokenizer=a_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
__snake_case , __snake_case : Any = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__snake_case : List[str] = None
__snake_case : Tuple = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(a_ , a_ , a_ , a_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__snake_case : Optional[Any] = IFImgaImgPipeline(**pipe_a.components )
__snake_case : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(a_ , a_ , a_ , a_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__snake_case : Any = IFInpaintingPipeline(**pipe_a.components )
__snake_case : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(a_ , a_ , a_ , a_ )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ ):
'''simple docstring'''
_start_torch_memory_measurement()
__snake_case : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case : Optional[Any] = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , num_inference_steps=2 , generator=a_ , output_type='''np''' , )
__snake_case : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
__snake_case : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
__snake_case : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(a_ , a_ )
# pipeline 2
_start_torch_memory_measurement()
__snake_case : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
__snake_case : Union[str, Any] = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , generator=a_ , num_inference_steps=2 , output_type='''np''' , )
__snake_case : List[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
__snake_case : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__snake_case : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(a_ , a_ )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ ):
'''simple docstring'''
_start_torch_memory_measurement()
__snake_case : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
__snake_case : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case : List[str] = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , num_inference_steps=2 , generator=a_ , output_type='''np''' , )
__snake_case : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
__snake_case : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__snake_case : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(a_ , a_ )
# pipeline 2
_start_torch_memory_measurement()
__snake_case : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case : Any = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(a_ )
__snake_case : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
__snake_case : Dict = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , original_image=a_ , generator=a_ , num_inference_steps=2 , output_type='''np''' , )
__snake_case : Optional[int] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
__snake_case : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__snake_case : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(a_ , a_ )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ ):
'''simple docstring'''
_start_torch_memory_measurement()
__snake_case : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
__snake_case : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(a_ )
__snake_case : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case : Union[str, Any] = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , mask_image=a_ , num_inference_steps=2 , generator=a_ , output_type='''np''' , )
__snake_case : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
__snake_case : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__snake_case : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(a_ , a_ )
# pipeline 2
_start_torch_memory_measurement()
__snake_case : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
__snake_case : Optional[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(a_ )
__snake_case : Tuple = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(a_ )
__snake_case : Tuple = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , mask_image=a_ , original_image=a_ , generator=a_ , num_inference_steps=2 , output_type='''np''' , )
__snake_case : Any = output.images[0]
assert image.shape == (2_56, 2_56, 3)
__snake_case : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__snake_case : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(a_ , a_ )
def lowercase ( ) ->Optional[Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 24 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE : List[Any] = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE : Tuple = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =MBartTokenizer
lowerCamelCase__ =[]
lowerCamelCase__ =[]
def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , **a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
super().__init__(
vocab_file=a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , **a_ , )
__snake_case : Tuple = vocab_file
__snake_case : Optional[Any] = False if not self.vocab_file else True
__snake_case : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__snake_case : Optional[int] = {
lang_code: self.convert_tokens_to_ids(a_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__snake_case : List[Any] = src_lang if src_lang is not None else '''en_XX'''
__snake_case : Any = self.convert_tokens_to_ids(self._src_lang )
__snake_case : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
__snake_case : Tuple = [self.sep_token_id]
__snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , **a_ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__snake_case : Optional[int] = src_lang
__snake_case : Tuple = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ )
__snake_case : Union[str, Any] = self.convert_tokens_to_ids(a_ )
__snake_case : int = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE (self , a_ , a_ = "en_XX" , a_ = None , a_ = "ro_RO" , **a_ , ):
'''simple docstring'''
__snake_case : int = src_lang
__snake_case : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : List[Any] = []
__snake_case : Any = [self.eos_token_id, self.cur_lang_code]
__snake_case : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : Optional[Any] = []
__snake_case : Dict = [self.eos_token_id, self.cur_lang_code]
__snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
__snake_case : Optional[Any] = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 24 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _a ( __a ):
__a : Optional[Any] = (DEISMultistepScheduler,)
__a : Any = (("""num_inference_steps""", 25),)
def A ( self : Any , **lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**lowercase )
return config
def A ( self : Union[str, Any] , lowercase : Optional[Any]=0 , **lowercase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config(**lowercase )
UpperCAmelCase = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
UpperCAmelCase = scheduler_class.from_pretrained(lowercase )
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase , UpperCAmelCase = sample, sample
for t in range(lowercase , time_step + scheduler.config.solver_order + 1 ):
UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCAmelCase = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : int ):
'''simple docstring'''
pass
def A ( self : str , lowercase : Any=0 , **lowercase : Tuple ):
'''simple docstring'''
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
UpperCAmelCase = scheduler_class.from_pretrained(lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCAmelCase = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : Any , lowercase : List[str]=None , **lowercase : List[Any] ):
'''simple docstring'''
if scheduler is None:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(**lowercase )
UpperCAmelCase = scheduler_class(**lowercase )
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(**lowercase )
UpperCAmelCase = scheduler_class(**lowercase )
UpperCAmelCase = 10
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = model(lowercase , lowercase )
UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
return sample
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase , '''set_timesteps''' ):
scheduler.set_timesteps(lowercase )
elif num_inference_steps is not None and not hasattr(lowercase , '''set_timesteps''' ):
UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
UpperCAmelCase = scheduler.timesteps[5]
UpperCAmelCase = scheduler.timesteps[6]
UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
UpperCAmelCase = self.full_loop(scheduler=lowercase )
UpperCAmelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1E-3
UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase = self.full_loop(scheduler=lowercase )
UpperCAmelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1E-3
def A ( self : Dict ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase )
def A ( self : int ):
'''simple docstring'''
self.check_over_configs(thresholding=lowercase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase , prediction_type=lowercase , sample_max_value=lowercase , algorithm_type='''deis''' , solver_order=lowercase , solver_type=lowercase , )
def A ( self : Optional[int] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def A ( self : Tuple ):
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , algorithm_type=lowercase , )
UpperCAmelCase = self.full_loop(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , algorithm_type=lowercase , )
assert not torch.isnan(lowercase ).any(), "Samples have nan numbers"
def A ( self : int ):
'''simple docstring'''
self.check_over_configs(lower_order_final=lowercase )
self.check_over_configs(lower_order_final=lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=lowercase , time_step=0 )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.full_loop()
UpperCAmelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1E-3
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
UpperCAmelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(thresholding=lowercase , dynamic_thresholding_ratio=0 )
UpperCAmelCase = scheduler_class(**lowercase )
UpperCAmelCase = 10
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = model(lowercase , lowercase )
UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
assert sample.dtype == torch.floataa
| 34 |
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
__A : int = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase ( _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.getbasetemp() / '''cache'''
_UpperCAmelCase = test_hf_cache_home / '''datasets'''
_UpperCAmelCase = test_hf_cache_home / '''metrics'''
_UpperCAmelCase = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='''session''' )
def lowercase ( ):
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , _SCREAMING_SNAKE_CASE )
| 260 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''fnet'''
def __init__( self : Tuple , __UpperCAmelCase : Optional[int]=32_000 , __UpperCAmelCase : Tuple=768 , __UpperCAmelCase : Any=12 , __UpperCAmelCase : str=3_072 , __UpperCAmelCase : Optional[int]="gelu_new" , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : List[Any]=512 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : int=0.02 , __UpperCAmelCase : str=1e-1_2 , __UpperCAmelCase : str=False , __UpperCAmelCase : Optional[int]=512 , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : List[str]=2 , **__UpperCAmelCase : str , ) ->List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = num_hidden_layers
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = initializer_range
a = type_vocab_size
a = layer_norm_eps
a = use_tpu_fourier_optimizations
a = tpu_short_seq_length
| 26 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _a ( a :str ) -> Any:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
a = model_type_to_module_name(a )
a = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(a , a )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(a , '''__name__''' , a ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a = importlib.import_module('''transformers''' )
if hasattr(a , a ):
return getattr(a , a )
return None
def _a ( a :Union[str, os.PathLike] , a :Optional[Union[str, os.PathLike]] = None , a :bool = False , a :bool = False , a :Optional[Dict[str, str]] = None , a :Optional[Union[bool, str]] = None , a :Optional[str] = None , a :bool = False , **a :int , ) -> Tuple:
a = get_file_from_repo(
a , a , cache_dir=a , force_download=a , resume_download=a , proxies=a , use_auth_token=a , revision=a , local_files_only=a , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(a , encoding='''utf-8''' ) as reader:
return json.load(a )
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple ) ->int:
"""simple docstring"""
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(__UpperCAmelCase )
def __lowerCAmelCase ( cls : int , __UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Dict ) ->List[Any]:
"""simple docstring"""
a = kwargs.pop('''config''' , __UpperCAmelCase )
a = kwargs.pop('''trust_remote_code''' , __UpperCAmelCase )
a = True
a , a = FeatureExtractionMixin.get_feature_extractor_dict(__UpperCAmelCase , **__UpperCAmelCase )
a = config_dict.get('''feature_extractor_type''' , __UpperCAmelCase )
a = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
a = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a = AutoConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
# It could be in `config.feature_extractor_type``
a = getattr(__UpperCAmelCase , '''feature_extractor_type''' , __UpperCAmelCase )
if hasattr(__UpperCAmelCase , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
a = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
a = feature_extractor_class_from_name(__UpperCAmelCase )
a = feature_extractor_auto_map is not None
a = feature_extractor_class is not None or type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING
a = resolve_trust_remote_code(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if has_remote_code and trust_remote_code:
a = get_class_from_dynamic_module(
__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
a = kwargs.pop('''code_revision''' , __UpperCAmelCase )
if os.path.isdir(__UpperCAmelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING:
a = FEATURE_EXTRACTOR_MAPPING[type(__UpperCAmelCase )]
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowerCAmelCase ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple ) ->Optional[int]:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(__UpperCAmelCase , __UpperCAmelCase )
| 26 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class a_ :
def __init__( self : Dict , lowercase : Tuple , lowercase : str=13 , lowercase : Optional[int]=7 , lowercase : str=6 , lowercase : Union[str, Any]=17 , lowercase : Optional[Any]=23 , lowercase : List[str]=11 , lowercase : List[str]=True , ):
"""simple docstring"""
lowercase_ :Optional[Any] = parent
lowercase_ :int = batch_size
lowercase_ :List[str] = seq_length
lowercase_ :int = act_dim
lowercase_ :Optional[int] = state_dim
lowercase_ :int = hidden_size
lowercase_ :Optional[Any] = max_length
lowercase_ :List[str] = is_training
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ :str = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase_ :Tuple = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase_ :Tuple = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ :Optional[Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ :str = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
lowercase_ :int = random_attention_mask((self.batch_size, self.seq_length) )
lowercase_ :str = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def lowercase__ ( self : Dict , lowercase : Optional[Any] , lowercase : Optional[Any] , lowercase : Any , lowercase : Tuple , lowercase : str , lowercase : List[Any] , lowercase : str , ):
"""simple docstring"""
lowercase_ :Tuple = DecisionTransformerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase_ :Optional[int] = model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :Tuple = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) :Any = config_and_inputs
lowercase_ :int = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class a_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__A = (DecisionTransformerModel,) if is_torch_available() else ()
__A = ()
__A = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__A = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__A = False
__A = False
__A = False
__A = False
__A = False
__A = False
__A = False
__A = False
__A = False
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :Optional[Any] = DecisionTransformerModelTester(self )
lowercase_ :Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowercase__ ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def lowercase__ ( self : int ):
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :Tuple = DecisionTransformerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ , lowercase_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :int = model_class(__lowerCAmelCase )
lowercase_ :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ :Tuple = [*signature.parameters.keys()]
lowercase_ :str = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(__lowerCAmelCase )] , __lowerCAmelCase )
@require_torch
class a_ ( unittest.TestCase ):
@slow
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :List[str] = 2 # number of steps of autoregressive prediction we will perform
lowercase_ :List[str] = 10 # defined by the RL environment, may be normalized
lowercase_ :Union[str, Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
lowercase_ :List[Any] = model.to(__lowerCAmelCase )
lowercase_ :List[str] = model.config
torch.manual_seed(0 )
lowercase_ :Tuple = torch.randn(1 , 1 , config.state_dim ).to(device=__lowerCAmelCase , dtype=torch.floataa ) # env.reset()
lowercase_ :Tuple = torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=__lowerCAmelCase )
lowercase_ :List[str] = torch.tensor(__lowerCAmelCase , device=__lowerCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowercase_ :Tuple = state
lowercase_ :Optional[Any] = torch.zeros(1 , 0 , config.act_dim , device=__lowerCAmelCase , dtype=torch.floataa )
lowercase_ :Tuple = torch.zeros(1 , 0 , device=__lowerCAmelCase , dtype=torch.floataa )
lowercase_ :Union[str, Any] = torch.tensor(0 , device=__lowerCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(__lowerCAmelCase ):
lowercase_ :List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__lowerCAmelCase )] , dim=1 )
lowercase_ :Dict = torch.cat([rewards, torch.zeros(1 , 1 , device=__lowerCAmelCase )] , dim=1 )
lowercase_ :List[str] = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowercase_ , lowercase_ , lowercase_ :Any = model(
states=__lowerCAmelCase , actions=__lowerCAmelCase , rewards=__lowerCAmelCase , returns_to_go=__lowerCAmelCase , timesteps=__lowerCAmelCase , attention_mask=__lowerCAmelCase , return_dict=__lowerCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ :int = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__lowerCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase_ :str = action_pred[0, -1]
lowercase_ :Tuple = torch.cat([states, state] , dim=1 )
lowercase_ :int = returns_to_go[0, -1] - reward
lowercase_ :Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowercase_ :Optional[Any] = torch.cat(
[timesteps, torch.ones((1, 1) , device=__lowerCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 223 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_a = None
_a = logging.get_logger(__name__)
_a = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_a = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
_a = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
_a = "▁"
# Segments (not really needed)
_a = 0
_a = 1
_a = 2
_a = 3
_a = 4
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = """left"""
lowerCAmelCase_ = XLNetTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<sep>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<cls>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=["<eop>", "<eod>"] , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
super().__init__(
vocab_file=__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCamelCase__ = 3
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = remove_space
lowerCamelCase__ = keep_accents
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase__ = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
return (out_vocab_file,)
| 209 | 0 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase__ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase__ : int = '''xvjiarui/stable-diffusion-2-inpainting'''
lowercase__ : Optional[Any] = FlaxStableDiffusionInpaintPipeline.from_pretrained(_snake_case ,safety_checker=_snake_case )
lowercase__ : int = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase__ : Tuple = jax.random.PRNGKey(0 )
lowercase__ : Optional[Any] = 50
lowercase__ : List[Any] = jax.device_count()
lowercase__ : Dict = num_samples * [prompt]
lowercase__ : int = num_samples * [init_image]
lowercase__ : List[str] = num_samples * [mask_image]
lowercase__ : Optional[Any] = pipeline.prepare_inputs(_snake_case ,_snake_case ,_snake_case )
# shard inputs and rng
lowercase__ : Any = replicate(_snake_case )
lowercase__ : Tuple = jax.random.split(_snake_case ,jax.device_count() )
lowercase__ : str = shard(_snake_case )
lowercase__ : Tuple = shard(_snake_case )
lowercase__ : Union[str, Any] = shard(_snake_case )
lowercase__ : Optional[Any] = pipeline(
_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,jit=_snake_case )
lowercase__ : Union[str, Any] = output.images.reshape(_snake_case ,512 ,512 ,3 )
lowercase__ : Any = images[0, 253:256, 253:256, -1]
lowercase__ : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : List[Any] = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 371 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302 | 0 |
from math import factorial
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 100 ) -> int:
return sum(map(_UpperCAmelCase , str(factorial(_UpperCAmelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 50 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE : Tuple = tokenizer("Hello there" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : str = model(a , labels=a ).loss
SCREAMING_SNAKE_CASE : Any = -tf.math.reduce_mean(a ).numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 ) | 76 | 0 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A ( _lowerCAmelCase ):
UpperCamelCase_ : Tuple =(UnCLIPScheduler,)
def _A (self , **lowerCAmelCase ):
__lowercase= {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**_lowercase )
return config
def _A (self ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_lowercase )
def _A (self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowercase )
def _A (self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def _A (self ):
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=_lowercase )
def _A (self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowercase )
def _A (self ):
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowercase , prev_timestep=_lowercase )
def _A (self ):
__lowercase= self.scheduler_classes[0]
__lowercase= self.get_scheduler_config(variance_type='fixed_small_log' )
__lowercase= scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_99_49_87 ) ) < 1E-5
def _A (self ):
__lowercase= self.scheduler_classes[0]
__lowercase= self.get_scheduler_config(variance_type='learned_range' )
__lowercase= scheduler_class(**_lowercase )
__lowercase= 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowercase ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(4_8_7 , predicted_variance=_lowercase ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(9_9_9 , predicted_variance=_lowercase ) - -0.0_01_00_11 < 1E-5
def _A (self ):
__lowercase= self.scheduler_classes[0]
__lowercase= self.get_scheduler_config()
__lowercase= scheduler_class(**_lowercase )
__lowercase= scheduler.timesteps
__lowercase= self.dummy_model()
__lowercase= self.dummy_sample_deter
__lowercase= torch.manual_seed(0 )
for i, t in enumerate(_lowercase ):
# 1. predict noise residual
__lowercase= model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
__lowercase= scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
__lowercase= pred_prev_sample
__lowercase= torch.sum(torch.abs(_lowercase ) )
__lowercase= torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def _A (self ):
__lowercase= self.scheduler_classes[0]
__lowercase= self.get_scheduler_config()
__lowercase= scheduler_class(**_lowercase )
scheduler.set_timesteps(2_5 )
__lowercase= scheduler.timesteps
__lowercase= self.dummy_model()
__lowercase= self.dummy_sample_deter
__lowercase= torch.manual_seed(0 )
for i, t in enumerate(_lowercase ):
# 1. predict noise residual
__lowercase= model(_lowercase , _lowercase )
if i + 1 == timesteps.shape[0]:
__lowercase= None
else:
__lowercase= timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase= scheduler.step(
_lowercase , _lowercase , _lowercase , prev_timestep=_lowercase , generator=_lowercase ).prev_sample
__lowercase= pred_prev_sample
__lowercase= torch.sum(torch.abs(_lowercase ) )
__lowercase= torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def _A (self ):
pass
def _A (self ):
pass
| 370 |
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( A_ ):
def _A (self ):
__lowercase= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_heads' ) )
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=6_4 , lowerCAmelCase=3 , lowerCAmelCase=[1_6, 4_8, 9_6] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 1_0] , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= image_size
__lowercase= patch_sizes
__lowercase= patch_stride
__lowercase= patch_padding
__lowercase= is_training
__lowercase= use_labels
__lowercase= num_labels
__lowercase= num_channels
__lowercase= embed_dim
__lowercase= num_heads
__lowercase= stride_kv
__lowercase= depth
__lowercase= cls_token
__lowercase= attention_drop_rate
__lowercase= initializer_range
__lowercase= layer_norm_eps
def _A (self ):
__lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.num_labels )
__lowercase= self.get_config()
return config, pixel_values, labels
def _A (self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= CvtModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= (self.image_size, self.image_size)
__lowercase, __lowercase= image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase= floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase= floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= CvtForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
__lowercase, __lowercase, __lowercase= config_and_inputs
__lowercase= {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Optional[int] =(CvtModel, CvtForImageClassification) if is_torch_available() else ()
UpperCamelCase_ : List[str] =(
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : str =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Any =False
UpperCamelCase_ : Union[str, Any] =False
UpperCamelCase_ : Tuple =False
def _A (self ):
__lowercase= CvtModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 )
def _A (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A (self ):
return
@unittest.skip(reason='Cvt does not output attentions' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def _A (self ):
pass
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= model_class(lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def _A (self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
__lowercase= outputs.hidden_states
__lowercase= len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _A (self ):
pass
@slow
def _A (self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= CvtModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def _A (self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _A (self ):
__lowercase= CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase )
__lowercase= self.default_image_processor
__lowercase= prepare_img()
__lowercase= image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )
# verify the logits
__lowercase= torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
__lowercase= torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 304 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
__snake_case = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(a__ ) , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a__ ) , x.transpose() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = torch.tensor(a__ )
self.assertTrue(np.allclose(transpose(a__ ) , transpose(a__ ).numpy() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = torch.tensor(a__ )
self.assertTrue(np.allclose(transpose(a__ , axes=(1, 2, 0) ) , transpose(a__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def a (self : Any ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = tf.constant(a__ )
self.assertTrue(np.allclose(transpose(a__ ) , transpose(a__ ).numpy() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = tf.constant(a__ )
self.assertTrue(np.allclose(transpose(a__ , axes=(1, 2, 0) ) , transpose(a__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def a (self : Any ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = jnp.array(a__ )
self.assertTrue(np.allclose(transpose(a__ ) , np.asarray(transpose(a__ ) ) ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = jnp.array(a__ )
self.assertTrue(np.allclose(transpose(a__ , axes=(1, 2, 0) ) , np.asarray(transpose(a__ , axes=(1, 2, 0) ) ) ) )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a__ , (4, 3) ) , np.reshape(a__ , (4, 3) ) ) )
__snake_case = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a__ , (12, 5) ) , np.reshape(a__ , (12, 5) ) ) )
@require_torch
def a (self : Dict ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = torch.tensor(a__ )
self.assertTrue(np.allclose(reshape(a__ , (4, 3) ) , reshape(a__ , (4, 3) ).numpy() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = torch.tensor(a__ )
self.assertTrue(np.allclose(reshape(a__ , (12, 5) ) , reshape(a__ , (12, 5) ).numpy() ) )
@require_tf
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = tf.constant(a__ )
self.assertTrue(np.allclose(reshape(a__ , (4, 3) ) , reshape(a__ , (4, 3) ).numpy() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = tf.constant(a__ )
self.assertTrue(np.allclose(reshape(a__ , (12, 5) ) , reshape(a__ , (12, 5) ).numpy() ) )
@require_flax
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = jnp.array(a__ )
self.assertTrue(np.allclose(reshape(a__ , (4, 3) ) , np.asarray(reshape(a__ , (4, 3) ) ) ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = jnp.array(a__ )
self.assertTrue(np.allclose(reshape(a__ , (12, 5) ) , np.asarray(reshape(a__ , (12, 5) ) ) ) )
def a (self : Any ):
"""simple docstring"""
__snake_case = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a__ ) , np.squeeze(a__ ) ) )
__snake_case = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a__ , axis=2 ) , np.squeeze(a__ , axis=2 ) ) )
@require_torch
def a (self : Tuple ):
"""simple docstring"""
__snake_case = np.random.randn(1 , 3 , 4 )
__snake_case = torch.tensor(a__ )
self.assertTrue(np.allclose(squeeze(a__ ) , squeeze(a__ ).numpy() ) )
__snake_case = np.random.randn(1 , 4 , 1 , 5 )
__snake_case = torch.tensor(a__ )
self.assertTrue(np.allclose(squeeze(a__ , axis=2 ) , squeeze(a__ , axis=2 ).numpy() ) )
@require_tf
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = np.random.randn(1 , 3 , 4 )
__snake_case = tf.constant(a__ )
self.assertTrue(np.allclose(squeeze(a__ ) , squeeze(a__ ).numpy() ) )
__snake_case = np.random.randn(1 , 4 , 1 , 5 )
__snake_case = tf.constant(a__ )
self.assertTrue(np.allclose(squeeze(a__ , axis=2 ) , squeeze(a__ , axis=2 ).numpy() ) )
@require_flax
def a (self : List[str] ):
"""simple docstring"""
__snake_case = np.random.randn(1 , 3 , 4 )
__snake_case = jnp.array(a__ )
self.assertTrue(np.allclose(squeeze(a__ ) , np.asarray(squeeze(a__ ) ) ) )
__snake_case = np.random.randn(1 , 4 , 1 , 5 )
__snake_case = jnp.array(a__ )
self.assertTrue(np.allclose(squeeze(a__ , axis=2 ) , np.asarray(squeeze(a__ , axis=2 ) ) ) )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a__ , axis=1 ) , np.expand_dims(a__ , axis=1 ) ) )
@require_torch
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = torch.tensor(a__ )
self.assertTrue(np.allclose(expand_dims(a__ , axis=1 ) , expand_dims(a__ , axis=1 ).numpy() ) )
@require_tf
def a (self : Any ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = tf.constant(a__ )
self.assertTrue(np.allclose(expand_dims(a__ , axis=1 ) , expand_dims(a__ , axis=1 ).numpy() ) )
@require_flax
def a (self : int ):
"""simple docstring"""
__snake_case = np.random.randn(3 , 4 )
__snake_case = jnp.array(a__ )
self.assertTrue(np.allclose(expand_dims(a__ , axis=1 ) , np.asarray(expand_dims(a__ , axis=1 ) ) ) )
| 24 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case_ = logging.getLogger(__name__)
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : str
A_ : str
A_ : Optional[str] = None
A_ : Optional[str] = None
A_ : Optional[str] = None
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : List[int]
A_ : Optional[List[int]] = None
A_ : Optional[List[int]] = None
A_ : Optional[Union[int, float]] = None
A_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[InputFeatures]
def __init__(self : int , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = None , a__ : List[Any]=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = os.path.join(
a__ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , )
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case = cached_features_file + '''.lock'''
with FileLock(a__ ):
if os.path.exists(a__ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case = torch.load(a__ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case = (
processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
)
logger.info('''Training examples: %s''' , len(a__ ) )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
logger.info('''Saving features into cached file %s''' , a__ )
torch.save(self.features , a__ )
def __len__(self : int ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Dict , a__ : List[Any] ):
"""simple docstring"""
return self.features[i]
def a (self : List[Any] ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ :
A_ : List[InputFeatures]
def __init__(self : Tuple , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = 128 , a__ : Any=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
__snake_case = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case = tf.data.Dataset.from_generator(
a__ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def a (self : Union[str, Any] ):
"""simple docstring"""
return self.dataset
def __len__(self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Any , a__ : Dict ):
"""simple docstring"""
return self.features[i]
def a (self : str ):
"""simple docstring"""
return self.label_list
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : Dict , a__ : Dict ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def a (self : Optional[int] , a__ : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def a (self : int ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def a (self : Any , a__ : Optional[int] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = []
for i, line in enumerate(a__ ):
if i == 0:
continue
__snake_case = '''%s-%s''' % (set_type, line[0])
__snake_case = line[5]
__snake_case = line[6]
__snake_case = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case = line[0]
examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) )
return examples
def lowerCamelCase__ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> List[str]:
__snake_case = {label: i for i, label in enumerate(snake_case_ )}
__snake_case = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
__snake_case = label_map[example.label] if example.label in label_map else 0
__snake_case = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
snake_case_ = {
'hans': 3,
}
snake_case_ = {
'hans': HansProcessor,
}
| 24 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 352 |
from __future__ import annotations
from random import choice
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> Optional[int]:
return choice(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list[int] , lowerCAmelCase: int ) -> int:
_UpperCAmelCase : List[Any] = random_pivot(lowerCAmelCase )
# partition based on pivot
# linear time
_UpperCAmelCase : List[str] = [e for e in lst if e < pivot]
_UpperCAmelCase : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowerCAmelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowerCAmelCase ) < k - 1:
return kth_number(lowerCAmelCase , k - len(lowerCAmelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 189 | 0 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def lowerCAmelCase_ ( snake_case_ ):
if len(snake_case_ ) < MIN_NUM_TOKENS:
return None
_A : Tuple = MinHash(num_perm=snake_case_ )
for token in set(snake_case_ ):
min_hash.update(token.encode() )
return min_hash
def lowerCAmelCase_ ( snake_case_ ):
return {t for t in NON_ALPHA.split(snake_case_ ) if len(t.strip() ) > 0}
class lowercase :
def __init__( self , *,
_a = 0.85 , ) -> List[Any]:
_A : Any = duplication_jaccard_threshold
_A : int = NUM_PERM
_A : List[str] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_A : str = defaultdict(_a )
def a__ ( self , _a , _a ) -> None:
_A : Any = self._index.query(_a )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(_a , _a )
if len(_a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_a )
def a__ ( self ) -> List[List[Dict]]:
_A : List[Any] = []
for base, duplicates in self._duplicate_clusters.items():
_A : Dict = [base] + list(_a )
# reformat the cluster to be a list of dict
_A : Tuple = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(_a )
return duplicate_clusters
def a__ ( self , _a ) -> None:
_A : str = self.get_duplicate_clusters()
with open(_a , """w""" ) as f:
json.dump(_a , _a )
def lowerCAmelCase_ ( snake_case_ ):
_A , _A : Tuple = element
_A : Any = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase_ ( snake_case_ ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash,ThreadedIterator(snake_case_,max_queue_size=10000 ),chunksize=100,):
if data is not None:
yield data
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = DuplicationIndex(duplication_jaccard_threshold=snake_case_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(snake_case_ ) ),max_queue_size=100 ) ):
di.add(snake_case_,snake_case_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Any = get_tokens(snake_case_ )
_A : Dict = get_tokens(snake_case_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[Any] = []
for elementa in cluster:
_A : List[str] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_A : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(snake_case_,snake_case_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_A : int = 1
extremes.append(snake_case_ )
return extremes
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
global _shared_dataset
_A : Any = dataset
_A : List[Any] = []
_A : Optional[int] = partial(_find_cluster_extremes_shared,jaccard_threshold=snake_case_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
snake_case_,snake_case_,),total=len(snake_case_ ),):
extremes_list.append(snake_case_ )
return extremes_list
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.85 ):
_A : List[Any] = make_duplicate_clusters(snake_case_,snake_case_ )
_A : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_A : List[str] = {}
_A : Optional[Any] = find_extremes(snake_case_,snake_case_,snake_case_ )
for extremes in extremes_clusters:
for element in extremes:
_A : int = element
_A : Union[str, Any] = duplicate_indices - set(extreme_dict.keys() )
_A : int = dataset.filter(lambda snake_case_,snake_case_ : idx not in remove_indices,with_indices=snake_case_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_A : int = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_A : Any = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'''Original dataset size: {len(snake_case_ )}''' )
print(f'''Number of duplicate clusters: {len(snake_case_ )}''' )
print(f'''Files in duplicate cluster: {len(snake_case_ )}''' )
print(f'''Unique files in duplicate cluster: {len(snake_case_ )}''' )
print(f'''Filtered dataset size: {len(snake_case_ )}''' )
return ds_filter, duplicate_clusters
| 26 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]:
super().__init__()
self.register_modules(
prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str:
if latents is None:
_A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_A : Union[str, Any] = latents.to(_a )
_A : int = latents * scheduler.init_noise_sigma
return latents
def a__ ( self , _a=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_A : str = torch.device(F'''cuda:{gpu_id}''' )
_A : Any = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
def a__ ( self ) -> List[Any]:
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def a__ ( self , _a , _a , _a , _a , ) -> Tuple:
if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ):
_A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 )
if not isinstance(_a , torch.Tensor ):
_A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
_A : int = image.to(dtype=self.image_encoder.dtype , device=_a )
_A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""]
_A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_A : Dict = image_embeds.repeat_interleave(_a , dim=0 )
if do_classifier_free_guidance:
_A : str = torch.zeros_like(_a )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A : List[str] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]:
if isinstance(_a , PIL.Image.Image ):
_A : List[Any] = 1
elif isinstance(_a , torch.Tensor ):
_A : Any = image.shape[0]
elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_A : Union[str, Any] = len(_a )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' )
_A : Optional[int] = self._execution_device
_A : Tuple = batch_size * num_images_per_prompt
_A : List[Any] = guidance_scale > 1.0
_A : Optional[Any] = self._encode_image(_a , _a , _a , _a )
# prior
self.scheduler.set_timesteps(_a , device=_a )
_A : Optional[int] = self.scheduler.timesteps
_A : List[str] = self.prior.config.num_embeddings
_A : int = self.prior.config.embedding_dim
_A : Optional[Any] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_A : List[Any] = latents.reshape(latents.shape[0] , _a , _a )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A : int = self.scheduler.scale_model_input(_a , _a )
_A : Tuple = self.prior(
_a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding
# remove the variance
_A , _A : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_A , _A : Dict = noise_pred.chunk(2 )
_A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_A : int = self.scheduler.step(
_a , timestep=_a , sample=_a , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_a )
_A : List[str] = []
for i, latent in enumerate(_a ):
print()
_A : List[str] = self.renderer.decode(
latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_a )
_A : List[Any] = torch.stack(_a )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_A : List[str] = images.cpu().numpy()
if output_type == "pil":
_A : List[Any] = [self.numpy_to_pil(_a ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_a )
| 26 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : int = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
snake_case_ : str = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
snake_case_ : int = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
snake_case_ : str = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_6000,
"return_attention_mask": False,
"do_normalize": True,
}
snake_case_ : int = tempfile.mkdtemp()
snake_case_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : str = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" )
# load decoder from hub
snake_case_ : Optional[int] = "hf-internal-testing/ngram-beam-search-decoder"
def _lowerCAmelCase ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ : List[str] = self.add_kwargs_tokens_map.copy()
kwargs.update(_SCREAMING_SNAKE_CASE )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , **_SCREAMING_SNAKE_CASE ) -> Any:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , **_SCREAMING_SNAKE_CASE ) -> Dict:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : Optional[int] = self.get_tokenizer()
snake_case_ : Optional[int] = self.get_feature_extractor()
snake_case_ : List[Any] = self.get_decoder()
snake_case_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
snake_case_ : Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _SCREAMING_SNAKE_CASE )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : int = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
snake_case_ : List[str] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : Optional[Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , "include" ):
WavaVecaProcessorWithLM(
tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : Tuple = self.get_feature_extractor()
snake_case_ : List[Any] = self.get_tokenizer()
snake_case_ : str = self.get_decoder()
snake_case_ : int = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
snake_case_ : int = floats_list((3, 1000) )
snake_case_ : Union[str, Any] = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors="np" )
snake_case_ : List[str] = processor(_SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : Optional[Any] = self.get_feature_extractor()
snake_case_ : Dict = self.get_tokenizer()
snake_case_ : Dict = self.get_decoder()
snake_case_ : Dict = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
snake_case_ : int = "This is a test string"
snake_case_ : Dict = processor(text=_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = tokenizer(_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE=(2, 10, 16) , _SCREAMING_SNAKE_CASE=77 ) -> Dict:
np.random.seed(_SCREAMING_SNAKE_CASE )
return np.random.rand(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : List[Any] = self.get_feature_extractor()
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : Dict = self.get_decoder()
snake_case_ : int = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
snake_case_ : str = self._get_dummy_logits(shape=(10, 16) , seed=13 )
snake_case_ : Tuple = processor.decode(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = decoder.decode_beams(_SCREAMING_SNAKE_CASE )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ : Tuple = self.get_feature_extractor()
snake_case_ : Any = self.get_tokenizer()
snake_case_ : List[str] = self.get_decoder()
snake_case_ : str = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
snake_case_ : int = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
snake_case_ : Any = processor.batch_decode(_SCREAMING_SNAKE_CASE )
else:
with get_context(_SCREAMING_SNAKE_CASE ).Pool() as pool:
snake_case_ : Union[str, Any] = processor.batch_decode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = list(_SCREAMING_SNAKE_CASE )
with get_context("fork" ).Pool() as p:
snake_case_ : List[Any] = decoder.decode_beams_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ , snake_case_ : Dict = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_SCREAMING_SNAKE_CASE , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(_SCREAMING_SNAKE_CASE , decoded_processor.logit_score )
self.assertListEqual(_SCREAMING_SNAKE_CASE , decoded_processor.lm_score )
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : int = self.get_feature_extractor()
snake_case_ : Any = self.get_tokenizer()
snake_case_ : List[Any] = self.get_decoder()
snake_case_ : str = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = self._get_dummy_logits()
snake_case_ : int = 15
snake_case_ : Optional[int] = -20.0
snake_case_ : Any = -4.0
snake_case_ : Optional[int] = processor.batch_decode(
_SCREAMING_SNAKE_CASE , beam_width=_SCREAMING_SNAKE_CASE , beam_prune_logp=_SCREAMING_SNAKE_CASE , token_min_logp=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[str] = decoded_processor_out.text
snake_case_ : Tuple = list(_SCREAMING_SNAKE_CASE )
with get_context("fork" ).Pool() as pool:
snake_case_ : Any = decoder.decode_beams_batch(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , beam_width=_SCREAMING_SNAKE_CASE , beam_prune_logp=_SCREAMING_SNAKE_CASE , token_min_logp=_SCREAMING_SNAKE_CASE , )
snake_case_ : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
snake_case_ : List[str] = [d[0][2] for d in decoded_decoder_out]
snake_case_ : Dict = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , _SCREAMING_SNAKE_CASE )
self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : int = self.get_feature_extractor()
snake_case_ : int = self.get_tokenizer()
snake_case_ : Any = self.get_decoder()
snake_case_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = self._get_dummy_logits()
snake_case_ : int = 2.0
snake_case_ : Optional[int] = 5.0
snake_case_ : Dict = -20.0
snake_case_ : Dict = True
snake_case_ : Union[str, Any] = processor.batch_decode(
_SCREAMING_SNAKE_CASE , alpha=_SCREAMING_SNAKE_CASE , beta=_SCREAMING_SNAKE_CASE , unk_score_offset=_SCREAMING_SNAKE_CASE , lm_score_boundary=_SCREAMING_SNAKE_CASE , )
snake_case_ : Dict = decoded_processor_out.text
snake_case_ : int = list(_SCREAMING_SNAKE_CASE )
decoder.reset_params(
alpha=_SCREAMING_SNAKE_CASE , beta=_SCREAMING_SNAKE_CASE , unk_score_offset=_SCREAMING_SNAKE_CASE , lm_score_boundary=_SCREAMING_SNAKE_CASE , )
with get_context("fork" ).Pool() as pool:
snake_case_ : str = decoder.decode_beams_batch(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
snake_case_ : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , _SCREAMING_SNAKE_CASE )
snake_case_ : int = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : List[Any] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case_ : str = processor.decoder.model_container[processor.decoder._model_key]
snake_case_ : Union[str, Any] = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
snake_case_ : int = os.listdir(_SCREAMING_SNAKE_CASE )
snake_case_ : str = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Union[str, Any] = snapshot_download("hf-internal-testing/processor_with_lm" )
snake_case_ : int = WavaVecaProcessorWithLM.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
snake_case_ : int = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
snake_case_ : Any = os.listdir(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = os.listdir(_SCREAMING_SNAKE_CASE )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : List[Any] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case_ : List[str] = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case_ : int = floats_list((3, 1000) )
snake_case_ : List[str] = processor_wavaveca(_SCREAMING_SNAKE_CASE , return_tensors="np" )
snake_case_ : Optional[int] = processor_auto(_SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
snake_case_ : Optional[int] = self._get_dummy_logits()
snake_case_ : Optional[int] = processor_wavaveca.batch_decode(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = processor_auto.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : int = self.get_feature_extractor()
snake_case_ : Optional[int] = self.get_tokenizer()
snake_case_ : List[Any] = self.get_decoder()
snake_case_ : str = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def _lowerCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ : Optional[int] = [d[key] for d in offsets]
return retrieved_list
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : Dict = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case_ : Optional[Any] = self._get_dummy_logits()[0]
snake_case_ : int = processor.decode(_SCREAMING_SNAKE_CASE , output_word_offsets=_SCREAMING_SNAKE_CASE )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : Dict = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case_ : List[str] = self._get_dummy_logits()
snake_case_ : Optional[Any] = processor.batch_decode(_SCREAMING_SNAKE_CASE , output_word_offsets=_SCREAMING_SNAKE_CASE )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(_SCREAMING_SNAKE_CASE , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _lowerCAmelCase ( self ) -> List[str]:
import torch
snake_case_ : str = load_dataset("common_voice" , "en" , split="train" , streaming=_SCREAMING_SNAKE_CASE )
snake_case_ : int = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_6000 ) )
snake_case_ : Dict = iter(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = next(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
snake_case_ : int = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
snake_case_ : List[str] = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
snake_case_ : int = model(_SCREAMING_SNAKE_CASE ).logits.cpu().numpy()
snake_case_ : Optional[Any] = processor.decode(logits[0] , output_word_offsets=_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
snake_case_ : List[str] = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
snake_case_ : int = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(_SCREAMING_SNAKE_CASE , "word" ) ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(" ".join(self.get_from_offsets(_SCREAMING_SNAKE_CASE , "word" ) ) , output.text )
# output times
snake_case_ : int = torch.tensor(self.get_from_offsets(_SCREAMING_SNAKE_CASE , "start_time" ) )
snake_case_ : Optional[Any] = torch.tensor(self.get_from_offsets(_SCREAMING_SNAKE_CASE , "end_time" ) )
# fmt: off
snake_case_ : int = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
snake_case_ : List[str] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=0.01 ) )
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=0.01 ) )
| 36 |
def lowerCAmelCase__ ( _a : float , _a : float ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 1 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Dict = 0
@slow
def _lowerCamelCase ( self) -> Dict:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
_A : Tuple = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast))
self.assertGreater(len(__lowercase) , 0)
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
_A : str = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
self.assertIsInstance(__lowercase , (GPTaTokenizer, GPTaTokenizerFast))
self.assertGreater(len(__lowercase) , 0)
def _lowerCamelCase ( self) -> Any:
_A : Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 1_2)
def _lowerCamelCase ( self) -> int:
_A : str = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , (RobertaTokenizer, RobertaTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 2_0)
def _lowerCamelCase ( self) -> List[str]:
_A : Optional[int] = AutoConfig.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
# Check that tokenizer_type ≠ model_type
_A : Dict = AutoTokenizer.from_pretrained(__lowercase , config=__lowercase)
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 1_2)
def _lowerCamelCase ( self) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(__lowercase , "vocab.txt"))
_A : int = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type="bert" , use_fast=__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(__lowercase , "vocab.json"))
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(__lowercase , "merges.txt"))
_A : Dict = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type="gpt2" , use_fast=__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
@require_tokenizers
def _lowerCamelCase ( self) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(__lowercase , "vocab.txt"))
_A : Tuple = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type="bert")
self.assertIsInstance(__lowercase , __lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(__lowercase , "vocab.json"))
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(__lowercase , "merges.txt"))
_A : List[str] = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type="gpt2")
self.assertIsInstance(__lowercase , __lowercase)
def _lowerCamelCase ( self) -> Optional[Any]:
with pytest.raises(__lowercase):
AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx")
@require_tokenizers
def _lowerCamelCase ( self) -> List[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
_A : int = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased")
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast))
if isinstance(__lowercase , __lowercase):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowercase)
else:
self.assertEqual(tokenizer.do_lower_case , __lowercase)
self.assertEqual(tokenizer.model_max_length , 5_1_2)
@require_tokenizers
def _lowerCamelCase ( self) -> Union[str, Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowercase , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ):
_A : Union[str, Any] = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists")
def _lowerCamelCase ( self) -> Optional[Any]:
_A : List[Any] = TOKENIZER_MAPPING.values()
_A : Optional[int] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__)
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__)
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowercase)
@require_tokenizers
def _lowerCamelCase ( self) -> List[str]:
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=__lowercase) , __lowercase)
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased") , __lowercase)
@require_tokenizers
def _lowerCamelCase ( self) -> List[Any]:
_A : Optional[int] = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=__lowercase)
_A : Any = "Hello, world. How are you?"
_A : Tuple = tokenizer.tokenize(__lowercase)
self.assertEqual("[UNK]" , tokens[0])
_A : Any = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=__lowercase)
_A : int = tokenizer.tokenize(__lowercase)
self.assertEqual("[UNK]" , tokens[0])
@require_tokenizers
def _lowerCamelCase ( self) -> Optional[int]:
_A : List[str] = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config")
self.assertEqual(type(__lowercase) , __lowercase)
self.assertEqual(tokenizer.model_max_length , 5_1_2)
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0)
self.assertEqual(tokenizer.unk_token , "[UNK]")
self.assertEqual(tokenizer.padding_side , "right")
self.assertEqual(tokenizer.truncation_side , "right")
def _lowerCamelCase ( self) -> str:
_A : List[str] = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast))
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase)
_A : Optional[int] = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , tokenizer.__class__)
self.assertEqual(tokenizera.vocab_size , 1_2)
def _lowerCamelCase ( self) -> str:
_A : str = AutoTokenizer.from_pretrained("ctrl")
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowercase , __lowercase)
def _lowerCamelCase ( self) -> Tuple:
_A : List[Any] = get_tokenizer_config("bert-base-cased")
_A : Optional[int] = config.pop("_commit_hash" , __lowercase)
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowercase , {"do_lower_case": False})
# This model does not have a tokenizer_config so we get back an empty dict.
_A : List[str] = get_tokenizer_config(__lowercase)
self.assertDictEqual(__lowercase , {})
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
_A : List[Any] = AutoTokenizer.from_pretrained(__lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase)
_A : Optional[int] = get_tokenizer_config(__lowercase)
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"] , "BertTokenizer")
def _lowerCamelCase ( self) -> Dict:
try:
AutoConfig.register("custom" , __lowercase)
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase):
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase)
_A : List[Any] = CustomTokenizer.from_pretrained(__lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase)
_A : Tuple = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _lowerCamelCase ( self) -> Union[str, Any]:
try:
AutoConfig.register("custom" , __lowercase)
# Can register in two steps
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None))
AutoTokenizer.register(__lowercase , fast_tokenizer_class=__lowercase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowercase , slow_tokenizer_class=__lowercase , fast_tokenizer_class=__lowercase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase):
AutoTokenizer.register(__lowercase , fast_tokenizer_class=__lowercase)
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
_A : int = BertTokenizerFast.from_pretrained(__lowercase)
bert_tokenizer.save_pretrained(__lowercase)
_A : List[Any] = CustomTokenizerFast.from_pretrained(__lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase)
_A : str = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
_A : Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase , use_fast=__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self) -> Dict:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowercase):
_A : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer")
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowercase):
_A : Union[str, Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowercase)
_A : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowercase)
self.assertTrue(tokenizer.special_attribute_present)
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase)
_A : int = AutoTokenizer.from_pretrained(__lowercase , trust_remote_code=__lowercase)
self.assertTrue(reloaded_tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast")
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast")
# Test we can also load the slow version
_A : Union[str, Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowercase , use_fast=__lowercase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase)
_A : str = AutoTokenizer.from_pretrained(__lowercase , trust_remote_code=__lowercase , use_fast=__lowercase)
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer")
self.assertTrue(reloaded_tokenizer.special_attribute_present)
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer")
@require_tokenizers
def _lowerCamelCase ( self) -> Dict:
class lowerCAmelCase__ ( lowerCamelCase__):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
class lowerCAmelCase__ ( lowerCamelCase__):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = NewTokenizer
__SCREAMING_SNAKE_CASE = False
try:
AutoConfig.register("custom" , __lowercase)
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase)
AutoTokenizer.register(__lowercase , fast_tokenizer_class=__lowercase)
# If remote code is not set, the default is to use local
_A : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer")
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast")
self.assertFalse(tokenizer.special_attribute_present)
_A : int = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=__lowercase)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
self.assertFalse(tokenizer.special_attribute_present)
# If remote code is disabled, we load the local one.
_A : Dict = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowercase)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast")
self.assertFalse(tokenizer.special_attribute_present)
_A : Union[str, Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowercase , use_fast=__lowercase)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
self.assertFalse(tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub
_A : Tuple = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowercase)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast")
self.assertTrue(tokenizer.special_attribute_present)
_A : List[Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowercase , use_fast=__lowercase)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
self.assertTrue(tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Dict = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=__lowercase)
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast")
# Test we can also load the slow version
_A : Optional[Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=__lowercase , use_fast=__lowercase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
def _lowerCamelCase ( self) -> List[Any]:
with self.assertRaisesRegex(
__lowercase , "bert-base is not a local folder and is not a valid model identifier"):
_A : str = AutoTokenizer.from_pretrained("bert-base")
def _lowerCamelCase ( self) -> Dict:
with self.assertRaisesRegex(
__lowercase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"):
_A : Dict = AutoTokenizer.from_pretrained(__lowercase , revision="aaaaaa")
def _lowerCamelCase ( self) -> Dict:
_A : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
with RequestCounter() as counter:
_A : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 11 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
lowerCamelCase__ = """▁"""
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] =VOCAB_FILES_NAMES
__lowerCamelCase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any =AlbertTokenizer
def __init__( self : Tuple , __lowercase : Union[str, Any]=None , __lowercase : Optional[int]=None , __lowercase : int=True , __lowercase : Dict=True , __lowercase : str=False , __lowercase : str="[CLS]" , __lowercase : List[Any]="[SEP]" , __lowercase : Any="<unk>" , __lowercase : List[Any]="[SEP]" , __lowercase : List[Any]="<pad>" , __lowercase : Optional[Any]="[CLS]" , __lowercase : List[str]="[MASK]" , **__lowercase : str , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase )
if isinstance(__lowercase , __lowercase )
else mask_token
)
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = False if not self.vocab_file else True
def UpperCamelCase_ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : str , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 302 | 0 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _UpperCAmelCase ( __lowerCamelCase : List[str] ) -> int:
if isinstance(__UpperCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCAmelCase__ :
def lowercase ( self : int , _lowerCamelCase : Any , _lowerCamelCase : Any ):
pass
def lowercase ( self : List[Any] ):
pass
def lowercase ( self : Any ):
pass
def lowercase ( self : List[str] , _lowerCamelCase : np.ndarray , _lowerCamelCase : np.ndarray , _lowerCamelCase : float ):
_snake_case = np.abs((a - b) ).max()
self.assertLessEqual(lowercase_ , lowercase_ , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def lowercase ( self : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=None , **_lowerCamelCase : Dict ):
_snake_case = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
_snake_case = FlaxVisionTextDualEncoderModel(lowercase_ )
_snake_case = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def lowercase ( self : Any , _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict=None , **_lowerCamelCase : str ):
_snake_case , _snake_case = self.get_vision_text_model(lowercase_ , lowercase_ )
_snake_case = {'''vision_model''': vision_model, '''text_model''': text_model}
_snake_case = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
_snake_case = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase ( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any]=None , **_lowerCamelCase : Union[str, Any] ):
_snake_case , _snake_case = self.get_vision_text_model(lowercase_ , lowercase_ )
_snake_case = {'''vision_model''': vision_model, '''text_model''': text_model}
_snake_case = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
_snake_case = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
_snake_case = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ )
_snake_case = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ )
_snake_case = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
_snake_case = after_output[0]
_snake_case = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1e-3 )
def lowercase ( self : Dict , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple=None , **_lowerCamelCase : Optional[Any] ):
_snake_case , _snake_case = self.get_vision_text_model(lowercase_ , lowercase_ )
_snake_case = {'''vision_model''': vision_model, '''text_model''': text_model}
_snake_case = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
_snake_case = model(
input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ )
_snake_case = output.vision_model_output.attentions
self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case = to_atuple(vision_model.config.image_size )
_snake_case = to_atuple(vision_model.config.patch_size )
_snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_snake_case = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_snake_case = output.text_model_output.attentions
self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase ( self : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : str ):
pt_model.to(lowercase_ )
pt_model.eval()
# prepare inputs
_snake_case = inputs_dict
_snake_case = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_snake_case = pt_model(**lowercase_ ).to_tuple()
_snake_case = fx_model(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowercase_ )
_snake_case = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ , from_pt=lowercase_ )
_snake_case = fx_model_loaded(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowercase_ )
_snake_case = VisionTextDualEncoderModel.from_pretrained(lowercase_ , from_flax=lowercase_ )
pt_model_loaded.to(lowercase_ )
pt_model_loaded.eval()
with torch.no_grad():
_snake_case = pt_model_loaded(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowercase_ , pt_output_loaded.numpy() , 4e-2 )
def lowercase ( self : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : Any ):
_snake_case = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
_snake_case = VisionTextDualEncoderModel(lowercase_ )
_snake_case = FlaxVisionTextDualEncoderModel(lowercase_ )
_snake_case = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_ )
_snake_case = fx_state
self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ )
def lowercase ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] ):
_snake_case = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
_snake_case = VisionTextDualEncoderModel(lowercase_ )
_snake_case = FlaxVisionTextDualEncoderModel(lowercase_ )
_snake_case = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params )
self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ )
def lowercase ( self : Any ):
_snake_case = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowercase_ )
def lowercase ( self : Tuple ):
_snake_case = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ )
def lowercase ( self : Optional[int] ):
_snake_case = self.prepare_config_and_inputs()
self.check_save_load(**lowercase_ )
def lowercase ( self : List[Any] ):
_snake_case = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowercase_ )
@is_pt_flax_cross_test
def lowercase ( self : Optional[Any] ):
_snake_case = self.prepare_config_and_inputs()
_snake_case = config_inputs_dict.pop('''vision_config''' )
_snake_case = config_inputs_dict.pop('''text_config''' )
_snake_case = config_inputs_dict
self.check_equivalence_pt_to_flax(lowercase_ , lowercase_ , lowercase_ )
self.check_equivalence_flax_to_pt(lowercase_ , lowercase_ , lowercase_ )
@slow
def lowercase ( self : List[str] ):
_snake_case , _snake_case = self.get_pretrained_model_and_inputs()
_snake_case = model_a(**lowercase_ )
_snake_case = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowercase_ )
_snake_case = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ )
_snake_case = model_a(**lowercase_ )
_snake_case = after_outputs[0]
_snake_case = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1e-5 )
@require_flax
class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
def lowercase ( self : Any ):
_snake_case = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , )
_snake_case = 13
_snake_case = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_snake_case = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_snake_case = random_attention_mask([batch_size, 4] )
_snake_case = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowercase ( self : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] ):
_snake_case = FlaxViTModel(lowercase_ )
_snake_case = FlaxBertModel(lowercase_ )
return vision_model, text_model
def lowercase ( self : Dict ):
_snake_case = FlaxViTModelTester(self )
_snake_case = FlaxBertModelTester(self )
_snake_case = vit_model_tester.prepare_config_and_inputs()
_snake_case = bert_model_tester.prepare_config_and_inputs()
_snake_case , _snake_case = vision_config_and_inputs
_snake_case , _snake_case , _snake_case , _snake_case = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
def lowercase ( self : List[str] ):
_snake_case = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , )
_snake_case = 13
_snake_case = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_snake_case = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_snake_case = random_attention_mask([batch_size, 4] )
_snake_case = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowercase ( self : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ):
_snake_case = FlaxCLIPVisionModel(lowercase_ )
_snake_case = FlaxBertModel(lowercase_ )
return vision_model, text_model
def lowercase ( self : Tuple ):
_snake_case = FlaxCLIPVisionModelTester(self )
_snake_case = FlaxBertModelTester(self )
_snake_case = clip_model_tester.prepare_config_and_inputs()
_snake_case = bert_model_tester.prepare_config_and_inputs()
_snake_case , _snake_case = vision_config_and_inputs
_snake_case , _snake_case , _snake_case , _snake_case = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowercase ( self : Optional[Any] ):
_snake_case = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
_snake_case = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_snake_case = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=lowercase_ , padding=lowercase_ , return_tensors='''np''' )
_snake_case = model(**lowercase_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_snake_case = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowercase_ , atol=1e-3 ) )
| 355 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : float ) -> float:
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(__lowerCamelCase ) * abs(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 40 | 0 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
lowerCamelCase_ , lowerCamelCase_ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
lowerCamelCase_ = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
lowerCamelCase_ = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCamelCase_ = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 79 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ ( UpperCamelCase):
def A ( self : List[str] ) -> List[Any]:
UpperCAmelCase_ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_A , '''num_heads''' ) )
class snake_case__ :
def __init__( self : List[Any] , _A : List[str] , _A : Optional[Any]=13 , _A : List[str]=64 , _A : Tuple=3 , _A : int=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Union[str, Any]=[1, 2, 10] , _A : List[Any]=[7, 3, 3] , _A : Optional[Any]=[4, 2, 2] , _A : List[Any]=[2, 1, 1] , _A : Union[str, Any]=[2, 2, 2] , _A : Tuple=[False, False, True] , _A : str=[0.0, 0.0, 0.0] , _A : List[Any]=0.02 , _A : int=1e-12 , _A : Optional[int]=True , _A : List[str]=True , _A : Union[str, Any]=2 , ) -> List[Any]:
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Tuple = patch_sizes
UpperCAmelCase_ : int = patch_stride
UpperCAmelCase_ : Any = patch_padding
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = num_labels
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Optional[int] = num_heads
UpperCAmelCase_ : Tuple = stride_kv
UpperCAmelCase_ : Optional[Any] = depth
UpperCAmelCase_ : Dict = cls_token
UpperCAmelCase_ : Dict = attention_drop_rate
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ) -> int:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def A ( self : Dict , _A : List[Any] , _A : Tuple , _A : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = CvtModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Tuple = model(_A )
UpperCAmelCase_ : List[str] = (self.image_size, self.image_size)
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase_ : int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase_ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def A ( self : Any , _A : int , _A : str , _A : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : str = CvtForImageClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : int = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Dict ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = config_and_inputs
UpperCAmelCase_ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
a_ = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : Optional[int] = CvtModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def A ( self : Any ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : int ) -> List[str]:
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def A ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def A ( self : Any ) -> Optional[Any]:
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def A ( self : List[Any] ) -> Any:
pass
def A ( self : int ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(_A )
UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def A ( self : Dict ) -> List[str]:
def check_hidden_states_output(_A : Dict , _A : str , _A : int ):
UpperCAmelCase_ : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase_ : Optional[Any] = outputs.hidden_states
UpperCAmelCase_ : Any = len(self.model_tester.depth )
self.assertEqual(len(_A ) , _A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Dict = True
check_hidden_states_output(_A , _A , _A )
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : List[Any] ) -> Optional[Any]:
pass
@slow
def A ( self : Optional[int] ) -> int:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = CvtModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase):
@cached_property
def A ( self : Union[str, Any] ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A ( self : str ) -> str:
UpperCAmelCase_ : str = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_A )
UpperCAmelCase_ : Optional[int] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Any = model(**_A )
# verify the logits
UpperCAmelCase_ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
| 304 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : List[str] = logging.get_logger(__name__)
__A : Dict = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__):
_UpperCamelCase:Optional[int] = "focalnet"
def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=96 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=[192, 384, 768, 768] , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[3, 3, 3, 3] , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1E-4 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , )-> Any:
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =image_size
lowerCamelCase_ =patch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =embed_dim
lowerCamelCase_ =use_conv_embed
lowerCamelCase_ =hidden_sizes
lowerCamelCase_ =depths
lowerCamelCase_ =focal_levels
lowerCamelCase_ =focal_windows
lowerCamelCase_ =hidden_act
lowerCamelCase_ =mlp_ratio
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =drop_path_rate
lowerCamelCase_ =use_layerscale
lowerCamelCase_ =layerscale_value
lowerCamelCase_ =use_post_layernorm
lowerCamelCase_ =use_post_layernorm_in_modulation
lowerCamelCase_ =normalize_modulator
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =encoder_stride
lowerCamelCase_ =["""stem"""] + [f'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
lowerCamelCase_ , lowerCamelCase_ =get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 367 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__A : Optional[int] = pd.read_csv('sample_data.csv', header=None)
__A : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
__A : Tuple = df.iloc[:, 1:2]
__A : Tuple = actual_data.values.reshape(len_data, 1)
__A : str = MinMaxScaler().fit_transform(actual_data)
__A : List[str] = 10
__A : Any = 5
__A : Optional[Any] = 20
__A : List[str] = len_data - periods * look_back
__A : str = actual_data[:division]
__A : int = actual_data[division - look_back :]
__A, __A : List[str] = [], []
__A, __A : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__A : List[Any] = np.array(train_x)
__A : Tuple = np.array(test_x)
__A : Any = np.array([list(i.ravel()) for i in train_y])
__A : List[Any] = np.array([list(i.ravel()) for i in test_y])
__A : Union[str, Any] = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
__A : Tuple = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
__A : Optional[int] = model.predict(x_test)
| 49 | 0 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class _a ( A__ ):
def __init__( self: List[Any] , UpperCamelCase_: Optional[NestedDataStructureLike[PathLike]] = None , UpperCamelCase_: Optional[NamedSplit] = None , UpperCamelCase_: Optional[Features] = None , UpperCamelCase_: str = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[int] = None , **UpperCamelCase_: str , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = path_or_paths
lowercase__ = split if split or isinstance(UpperCamelCase_ , UpperCamelCase_ ) else "train"
lowercase__ = features
lowercase__ = cache_dir
lowercase__ = keep_in_memory
lowercase__ = streaming
lowercase__ = num_proc
lowercase__ = kwargs
@abstractmethod
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
pass
class _a ( A__ ):
def __init__( self: str , UpperCamelCase_: Optional[Features] = None , UpperCamelCase_: str = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[int] = None , **UpperCamelCase_: List[str] , ) -> str:
"""simple docstring"""
lowercase__ = features
lowercase__ = cache_dir
lowercase__ = keep_in_memory
lowercase__ = streaming
lowercase__ = num_proc
lowercase__ = kwargs
@abstractmethod
def lowerCamelCase_ ( self: Optional[int] ) -> Any:
"""simple docstring"""
pass
| 110 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCamelCase : Any =logging.get_logger(__name__)
class __a ( A__ ):
_lowerCAmelCase : List[str] = ['''pixel_values''']
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 2_55 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = size if size is not None else {"shortest_edge": 2_24}
UpperCamelCase__ : Any = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = crop_size if crop_size is not None else {"height": 2_56, "width": 2_56}
UpperCamelCase__ : int = get_size_dict(SCREAMING_SNAKE_CASE , param_name="crop_size" )
UpperCamelCase__ : Dict = do_resize
UpperCamelCase__ : List[str] = size
UpperCamelCase__ : int = resample
UpperCamelCase__ : Optional[int] = do_rescale
UpperCamelCase__ : List[Any] = rescale_factor
UpperCamelCase__ : Union[str, Any] = do_center_crop
UpperCamelCase__ : int = crop_size
UpperCamelCase__ : Optional[int] = do_flip_channel_order
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : PILImageResampling = PIL.Image.BILINEAR , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
UpperCamelCase__ : Any = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=size["shortest_edge"] , default_to_square=SCREAMING_SNAKE_CASE )
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Union[int, float] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : float = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
UpperCamelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : List[str] = resample if resample is not None else self.resample
UpperCamelCase__ : Any = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ : Any = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCamelCase__ : Optional[int] = size if size is not None else self.size
UpperCamelCase__ : List[str] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE , param_name="crop_size" )
UpperCamelCase__ : int = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ : Tuple = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
UpperCamelCase__ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
UpperCamelCase__ : Any = [self.center_crop(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCamelCase__ : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCamelCase__ : Any = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : Optional[int] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Tuple] = None ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Optional[Any] = target_sizes.numpy()
UpperCamelCase__ : Any = []
for idx in range(len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ : List[str] = logits.argmax(dim=1 )
UpperCamelCase__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 189 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = Dict[str, Any]
lowercase__ = List[Prediction]
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : str , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Any ) -> str:
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case )
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowerCAmelCase ( self : int , **UpperCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
snake_case : Optional[int] = {}
if "threshold" in kwargs:
snake_case : List[Any] = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : Optional[int] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : str ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(*_snake_case , **_snake_case )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : Dict ) -> int:
"""simple docstring"""
snake_case : Optional[int] = load_image(_snake_case )
snake_case : Any = torch.IntTensor([[image.height, image.width]] )
snake_case : Optional[Any] = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
snake_case : Optional[Any] = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
snake_case : List[Any] = target_size
return inputs
def lowerCAmelCase ( self : int , UpperCamelCase__ : Tuple ) -> List[str]:
"""simple docstring"""
snake_case : Optional[int] = model_inputs.pop('''target_size''' )
snake_case : Dict = self.model(**_snake_case )
snake_case : Optional[int] = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
snake_case : Union[str, Any] = model_inputs['''bbox''']
return model_outputs
def lowerCAmelCase ( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=0.9 ) -> Optional[int]:
"""simple docstring"""
snake_case : str = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
snake_case ,snake_case : List[Any] = target_size[0].tolist()
def unnormalize(UpperCamelCase__ : Optional[int] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
snake_case ,snake_case : Optional[int] = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
snake_case : Tuple = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
snake_case : Dict = [unnormalize(_snake_case ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
snake_case : Tuple = ['''score''', '''label''', '''box''']
snake_case : Any = [dict(zip(_snake_case , _snake_case ) ) for vals in zip(scores.tolist() , _snake_case , _snake_case ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
snake_case : Tuple = self.image_processor.post_process_object_detection(_snake_case , _snake_case , _snake_case )
snake_case : Optional[int] = raw_annotations[0]
snake_case : Optional[int] = raw_annotation['''scores''']
snake_case : Dict = raw_annotation['''labels''']
snake_case : Tuple = raw_annotation['''boxes''']
snake_case : Tuple = scores.tolist()
snake_case : List[Any] = [self.model.config.idalabel[label.item()] for label in labels]
snake_case : int = [self._get_bounding_box(_snake_case ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
snake_case : Dict = ['''score''', '''label''', '''box''']
snake_case : Dict = [
dict(zip(_snake_case , _snake_case ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : "torch.Tensor" ) -> Union[str, Any]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
snake_case ,snake_case ,snake_case ,snake_case : str = box.int().tolist()
snake_case : Optional[int] = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 357 |
'''simple docstring'''
class snake_case__ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : list[int] ) -> None:
"""simple docstring"""
snake_case : List[Any] = len(UpperCamelCase__ )
snake_case : Tuple = [0] * len_array
if len_array > 0:
snake_case : List[str] = array[0]
for i in range(1 , UpperCamelCase__ ):
snake_case : Tuple = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase ( self : str , UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
snake_case : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCamelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 0 |
_snake_case = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 36 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(_lowerCamelCase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = _distribute_shards(**_lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = _split_gen_kwargs(_lowerCamelCase , _lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(_lowerCamelCase ):
_number_of_shards_in_gen_kwargs(_lowerCamelCase )
else:
_lowerCAmelCase : Optional[int] = _number_of_shards_in_gen_kwargs(_lowerCamelCase )
assert out == expected
| 36 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase = logging.get_logger(__name__)
class __lowercase ( A, A ):
'''simple docstring'''
_A : Any = '''maskformer-swin'''
_A : Union[str, Any] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[int] , _a : List[Any]=224 , _a : Dict=4 , _a : List[Any]=3 , _a : List[str]=96 , _a : Any=[2, 2, 6, 2] , _a : Tuple=[3, 6, 12, 24] , _a : Union[str, Any]=7 , _a : int=4.0 , _a : Tuple=True , _a : Tuple=0.0 , _a : List[str]=0.0 , _a : Optional[int]=0.1 , _a : Dict="gelu" , _a : Tuple=False , _a : Dict=0.02 , _a : Union[str, Any]=1E-5 , _a : str=None , _a : Dict=None , **_a : int , ):
super().__init__(**_a )
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = len(_a )
UpperCamelCase__ = num_heads
UpperCamelCase__ = window_size
UpperCamelCase__ = mlp_ratio
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = hidden_act
UpperCamelCase__ = use_absolute_embeddings
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ = int(embed_dim * 2 ** (len(_a ) - 1) )
UpperCamelCase__ = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(_a ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 35 | import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = ["""model.decoder.embed_positions.weights"""]
def lowerCamelCase_ ( UpperCamelCase__ : Any ):
'''simple docstring'''
if "emb" in name:
UpperCamelCase__ = name.replace('''emb''', '''model.decoder.embed_tokens''' )
if "transformer" in name:
UpperCamelCase__ = name.replace('''transformer''', '''model.decoder''' )
if "cross_attention" in name:
UpperCamelCase__ = name.replace('''cross_attention''', '''encoder_attn''' )
if "linear1" in name:
UpperCamelCase__ = name.replace('''linear1''', '''fc1''' )
if "linear2" in name:
UpperCamelCase__ = name.replace('''linear2''', '''fc2''' )
if "norm1" in name:
UpperCamelCase__ = name.replace('''norm1''', '''self_attn_layer_norm''' )
if "norm_cross" in name:
UpperCamelCase__ = name.replace('''norm_cross''', '''encoder_attn_layer_norm''' )
if "norm2" in name:
UpperCamelCase__ = name.replace('''norm2''', '''final_layer_norm''' )
if "out_norm" in name:
UpperCamelCase__ = name.replace('''out_norm''', '''model.decoder.layer_norm''' )
if "linears" in name:
UpperCamelCase__ = name.replace('''linears''', '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCamelCase__ = name.replace('''condition_provider.conditioners.description.output_proj''', '''enc_to_dec_proj''' )
return name
def lowerCamelCase_ ( UpperCamelCase__ : OrderedDict, UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = list(state_dict.keys() )
UpperCamelCase__ = {}
for key in keys:
UpperCamelCase__ = state_dict.pop(UpperCamelCase__ )
UpperCamelCase__ = rename_keys(UpperCamelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCamelCase__ = val[:hidden_size, :]
UpperCamelCase__ = val[hidden_size : 2 * hidden_size, :]
UpperCamelCase__ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCamelCase__ = val
else:
UpperCamelCase__ = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
if checkpoint == "small":
# default config values
UpperCamelCase__ = 1024
UpperCamelCase__ = 24
UpperCamelCase__ = 16
elif checkpoint == "medium":
UpperCamelCase__ = 1536
UpperCamelCase__ = 48
UpperCamelCase__ = 24
elif checkpoint == "large":
UpperCamelCase__ = 2048
UpperCamelCase__ = 48
UpperCamelCase__ = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
UpperCamelCase__ = MusicgenDecoderConfig(
hidden_size=UpperCamelCase__, ffn_dim=hidden_size * 4, num_hidden_layers=UpperCamelCase__, num_attention_heads=UpperCamelCase__, )
return config
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : Any=None, UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : List[Any]="cpu" ):
'''simple docstring'''
UpperCamelCase__ = MusicGen.get_pretrained(UpperCamelCase__, device=UpperCamelCase__ )
UpperCamelCase__ = decoder_config_from_checkpoint(UpperCamelCase__ )
UpperCamelCase__ = fairseq_model.lm.state_dict()
UpperCamelCase__ , UpperCamelCase__ = rename_state_dict(
UpperCamelCase__, hidden_size=decoder_config.hidden_size )
UpperCamelCase__ = TaEncoderModel.from_pretrained('''t5-base''' )
UpperCamelCase__ = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
UpperCamelCase__ = MusicgenForCausalLM(UpperCamelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCamelCase__ , UpperCamelCase__ = decoder.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
UpperCamelCase__ = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__, audio_encoder=UpperCamelCase__, decoder=UpperCamelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ )
# check we can do a forward pass
UpperCamelCase__ = torch.arange(0, 8, dtype=torch.long ).reshape(2, -1 )
UpperCamelCase__ = input_ids.reshape(2 * 4, -1 )
with torch.no_grad():
UpperCamelCase__ = model(input_ids=UpperCamelCase__, decoder_input_ids=UpperCamelCase__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
UpperCamelCase__ = AutoTokenizer.from_pretrained('''t5-base''' )
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''', padding_side='''left''' )
UpperCamelCase__ = MusicgenProcessor(feature_extractor=UpperCamelCase__, tokenizer=UpperCamelCase__ )
# set the appropriate bos/pad token ids
UpperCamelCase__ = 2048
UpperCamelCase__ = 2048
# set other default generation config params
UpperCamelCase__ = int(30 * audio_encoder.config.frame_rate )
UpperCamelCase__ = True
UpperCamelCase__ = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(UpperCamelCase__ )
processor.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
lowercase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 35 | 1 |
import fire
from utils import calculate_rouge, save_json
def A ( a_ ,a_ ,a_=None ,**a_ ) -> Tuple:
__UpperCamelCase : Dict =[x.strip() for x in open(a_ ).readlines()]
__UpperCamelCase : str =[x.strip() for x in open(a_ ).readlines()][: len(a_ )]
__UpperCamelCase : Union[str, Any] =calculate_rouge(a_ ,a_ ,**a_ )
if save_path is not None:
save_json(a_ ,a_ ,indent=a_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 71 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowercase = datasets.utils.logging.get_logger(__name__)
@dataclass
class _A ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase : int = 1_0_0_0_0
UpperCAmelCase : Optional[List[str]] = None
UpperCAmelCase : Optional[datasets.Features] = None
class _A ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase : str = ParquetConfig
def __snake_case ( self : Tuple):
return datasets.DatasetInfo(features=self.config.features)
def __snake_case ( self : List[Any] , __UpperCAmelCase : str):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''')
a : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__UpperCAmelCase , (str, list, tuple)):
a : Dict = data_files
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})]
a : Dict = []
for split_name, files in data_files.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__UpperCAmelCase):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase))
break
splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files}))
return splits
def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema)
return pa_table
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int):
a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''')
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = pq.ParquetFile(__UpperCAmelCase)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)):
a : Optional[Any] = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase)
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''')
raise
| 40 | 0 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _UpperCAmelCase ( _UpperCamelCase : int ) -> str:
random.seed(_UpperCamelCase )
np.random.seed(_UpperCamelCase )
torch.manual_seed(_UpperCamelCase )
torch.cuda.manual_seed_all(_UpperCamelCase )
# ^^ safe to call this function even if cuda is not available
class __UpperCAmelCase :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.9_999 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 1.0 , _SCREAMING_SNAKE_CASE = 2 / 3 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
if isinstance(_SCREAMING_SNAKE_CASE , torch.nn.Module ):
A_ = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE , )
A_ = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
A_ = True
if kwargs.get('''max_value''' , _SCREAMING_SNAKE_CASE ) is not None:
A_ = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE )
A_ = kwargs['''max_value''']
if kwargs.get('''min_value''' , _SCREAMING_SNAKE_CASE ) is not None:
A_ = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE )
A_ = kwargs['''min_value''']
A_ = list(_SCREAMING_SNAKE_CASE )
A_ = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , _SCREAMING_SNAKE_CASE ) is not None:
A_ = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE )
self.to(device=kwargs['''device'''] )
A_ = None
A_ = decay
A_ = min_decay
A_ = update_after_step
A_ = use_ema_warmup
A_ = inv_gamma
A_ = power
A_ = 0
A_ = None # set in `step()`
A_ = model_cls
A_ = model_config
@classmethod
def __A ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> "EMAModel":
A_ ,A_ = model_cls.load_config(_SCREAMING_SNAKE_CASE , return_unused_kwargs=_SCREAMING_SNAKE_CASE )
A_ = model_cls.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = cls(model.parameters() , model_cls=_SCREAMING_SNAKE_CASE , model_config=model.config )
ema_model.load_state_dict(_SCREAMING_SNAKE_CASE )
return ema_model
def __A ( self , _SCREAMING_SNAKE_CASE ) -> int:
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
A_ = self.model_cls.from_config(self.model_config )
A_ = self.state_dict()
state_dict.pop('''shadow_params''' , _SCREAMING_SNAKE_CASE )
model.register_to_config(**_SCREAMING_SNAKE_CASE )
self.copy_to(model.parameters() )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> float:
A_ = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
A_ = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
A_ = (1 + step) / (10 + step)
A_ = min(_SCREAMING_SNAKE_CASE , self.decay )
# make sure decay is not smaller than min_decay
A_ = max(_SCREAMING_SNAKE_CASE , self.min_decay )
return cur_decay_value
@torch.no_grad()
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
if isinstance(_SCREAMING_SNAKE_CASE , torch.nn.Module ):
A_ = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE , )
A_ = parameters.parameters()
A_ = list(_SCREAMING_SNAKE_CASE )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
A_ = self.get_decay(self.optimization_step )
A_ = decay
A_ = 1 - decay
A_ = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _SCREAMING_SNAKE_CASE ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
A_ = deepspeed.zero.GatheredParameters(_SCREAMING_SNAKE_CASE , modifier_rank=_SCREAMING_SNAKE_CASE )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> None:
A_ = list(_SCREAMING_SNAKE_CASE )
for s_param, param in zip(self.shadow_params , _SCREAMING_SNAKE_CASE ):
param.data.copy_(s_param.to(param.device ).data )
def __A ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> None:
A_ = [
p.to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ) if p.is_floating_point() else p.to(device=_SCREAMING_SNAKE_CASE )
for p in self.shadow_params
]
def __A ( self ) -> dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __A ( self , _SCREAMING_SNAKE_CASE ) -> None:
A_ = [param.detach().cpu().clone() for param in parameters]
def __A ( self , _SCREAMING_SNAKE_CASE ) -> None:
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , _SCREAMING_SNAKE_CASE ):
param.data.copy_(c_param.data )
# Better memory-wise.
A_ = None
def __A ( self , _SCREAMING_SNAKE_CASE ) -> None:
A_ = copy.deepcopy(_SCREAMING_SNAKE_CASE )
A_ = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
A_ = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , _SCREAMING_SNAKE_CASE ):
raise ValueError('''Invalid min_decay''' )
A_ = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , _SCREAMING_SNAKE_CASE ):
raise ValueError('''Invalid optimization_step''' )
A_ = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , _SCREAMING_SNAKE_CASE ):
raise ValueError('''Invalid update_after_step''' )
A_ = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , _SCREAMING_SNAKE_CASE ):
raise ValueError('''Invalid use_ema_warmup''' )
A_ = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
A_ = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
A_ = state_dict.get('''shadow_params''' , _SCREAMING_SNAKE_CASE )
if shadow_params is not None:
A_ = shadow_params
if not isinstance(self.shadow_params , _SCREAMING_SNAKE_CASE ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 365 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Optional[int] = 'mgp-str'
def __init__( self , _SCREAMING_SNAKE_CASE=[32, 128] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=27 , _SCREAMING_SNAKE_CASE=38 , _SCREAMING_SNAKE_CASE=5_0257 , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
super().__init__(**_SCREAMING_SNAKE_CASE )
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = max_token_length
A_ = num_character_labels
A_ = num_bpe_labels
A_ = num_wordpiece_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = mlp_ratio
A_ = distilled
A_ = layer_norm_eps
A_ = drop_rate
A_ = qkv_bias
A_ = attn_drop_rate
A_ = drop_path_rate
A_ = output_aa_attentions
A_ = initializer_range
| 18 | 0 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _A : Optional[int] , _A : Any , _A : List[Any] , _A : Any ):
"""simple docstring"""
lowerCamelCase__ : Any = BigBirdConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
if is_trivia_qa:
lowerCamelCase__ : List[str] = BigBirdForQuestionAnswering(_UpperCAmelCase )
else:
lowerCamelCase__ : Tuple = BigBirdForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_UpperCAmelCase , _UpperCAmelCase , is_trivia_qa=_UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
A : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 184 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__snake_case :str = logging.get_logger(__name__)
__snake_case :int = {'''vocab_file''': '''vocab.txt'''}
__snake_case :List[Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__snake_case :List[str] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__snake_case :Optional[int] = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : int = ConvBertTokenizer
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]="[UNK]" , __SCREAMING_SNAKE_CASE : int="[SEP]" , __SCREAMING_SNAKE_CASE : List[Any]="[PAD]" , __SCREAMING_SNAKE_CASE : int="[CLS]" , __SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE) != do_lower_case
or normalizer_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __SCREAMING_SNAKE_CASE) != tokenize_chinese_chars
):
__a = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('''type'''))
__a = do_lower_case
__a = strip_accents
__a = tokenize_chinese_chars
__a = normalizer_class(**__SCREAMING_SNAKE_CASE)
__a = do_lower_case
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=None):
'''simple docstring'''
__a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
__a = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE)
return tuple(__SCREAMING_SNAKE_CASE)
| 49 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCAmelCase : str = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Optional[int] , snake_case : List[Any] )-> Any:
'''simple docstring'''
UpperCAmelCase__ : int = state_dict.pop(snake_case )
UpperCAmelCase__ : str = val
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Dict = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase__ : Any = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase__ : Dict = value
else:
UpperCAmelCase__ : int = value
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase__ : Optional[int] = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
UpperCAmelCase__ : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ : Optional[int] = in_proj_weight[:256, :]
UpperCAmelCase__ : int = in_proj_bias[:256]
UpperCAmelCase__ : List[str] = in_proj_weight[256:512, :]
UpperCAmelCase__ : List[Any] = in_proj_bias[256:512]
UpperCAmelCase__ : Optional[int] = in_proj_weight[-256:, :]
UpperCAmelCase__ : str = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase__ : Tuple = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
UpperCAmelCase__ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ : Dict = in_proj_weight[:256, :]
UpperCAmelCase__ : Tuple = in_proj_bias[:256]
UpperCAmelCase__ : Union[str, Any] = in_proj_weight[256:512, :]
UpperCAmelCase__ : Optional[Any] = in_proj_bias[256:512]
UpperCAmelCase__ : List[Any] = in_proj_weight[-256:, :]
UpperCAmelCase__ : List[Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase__ : Dict = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
UpperCAmelCase__ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase__ : List[str] = in_proj_weight_cross_attn[:256, :]
UpperCAmelCase__ : str = in_proj_bias_cross_attn[:256]
UpperCAmelCase__ : Tuple = in_proj_weight_cross_attn[256:512, :]
UpperCAmelCase__ : List[Any] = in_proj_bias_cross_attn[256:512]
UpperCAmelCase__ : int = in_proj_weight_cross_attn[-256:, :]
UpperCAmelCase__ : Optional[Any] = in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : str )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = image.size
UpperCAmelCase__ : Tuple = max(snake_case , snake_case )
UpperCAmelCase__ : List[Any] = 800 if "detection" in checkpoint_url else 1000
UpperCAmelCase__ : List[str] = target_max_size / current_max_size
UpperCAmelCase__ : Dict = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = F.to_tensor(snake_case )
UpperCAmelCase__ : List[Any] = F.normalize(snake_case , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Any , snake_case : Optional[int] )-> Any:
'''simple docstring'''
logger.info("Converting model..." )
# load original state dict
UpperCAmelCase__ : Optional[int] = torch.hub.load_state_dict_from_url(snake_case , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
UpperCAmelCase__ : str = rename_backbone_keys(snake_case )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase__ : Dict = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase__ : Optional[Any] = state_dict.pop(snake_case )
UpperCAmelCase__ : List[str] = val
# create HuggingFace model and load state dict
UpperCAmelCase__ : Optional[int] = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCAmelCase__ : List[Any] = 15
UpperCAmelCase__ : Any = 2
UpperCAmelCase__ : Dict = {0: "table", 1: "table rotated"}
UpperCAmelCase__ : Dict = idalabel
UpperCAmelCase__ : List[str] = {v: k for k, v in idalabel.items()}
else:
UpperCAmelCase__ : Any = 125
UpperCAmelCase__ : Dict = 6
UpperCAmelCase__ : int = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
UpperCAmelCase__ : str = idalabel
UpperCAmelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ : Any = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
UpperCAmelCase__ : Optional[Any] = TableTransformerForObjectDetection(snake_case )
model.load_state_dict(snake_case )
model.eval()
# verify our conversion
UpperCAmelCase__ : Optional[Any] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
UpperCAmelCase__ : str = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=snake_case )
UpperCAmelCase__ : Optional[Any] = Image.open(snake_case ).convert("RGB" )
UpperCAmelCase__ : Optional[Any] = normalize(resize(snake_case , snake_case ) ).unsqueeze(0 )
UpperCAmelCase__ : str = model(snake_case )
if "detection" in checkpoint_url:
UpperCAmelCase__ : Optional[Any] = (1, 15, 3)
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
UpperCAmelCase__ : int = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
UpperCAmelCase__ : List[str] = (1, 125, 7)
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
UpperCAmelCase__ : List[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , snake_case , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , snake_case , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(snake_case ).mkdir(exist_ok=snake_case )
model.save_pretrained(snake_case )
image_processor.save_pretrained(snake_case )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
UpperCAmelCase__ : List[Any] = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(snake_case )
image_processor.push_to_hub(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowerCAmelCase : Dict = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 298 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''efficientformer'''
def __init__( self : List[Any] , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : str , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : List[str] = hidden_sizes
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : List[Any] = layer_norm_eps
UpperCAmelCase__ : Optional[int] = patch_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : Optional[int] = depths
UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio
UpperCAmelCase__ : Dict = downsamples
UpperCAmelCase__ : Any = dim
UpperCAmelCase__ : str = key_dim
UpperCAmelCase__ : List[Any] = attention_ratio
UpperCAmelCase__ : Optional[Any] = resolution
UpperCAmelCase__ : Optional[Any] = pool_size
UpperCAmelCase__ : Any = downsample_patch_size
UpperCAmelCase__ : int = downsample_stride
UpperCAmelCase__ : Dict = downsample_pad
UpperCAmelCase__ : List[Any] = drop_path_rate
UpperCAmelCase__ : Optional[Any] = num_metaad_blocks
UpperCAmelCase__ : List[str] = distillation
UpperCAmelCase__ : Dict = use_layer_scale
UpperCAmelCase__ : List[Any] = layer_scale_init_value
UpperCAmelCase__ : Optional[Any] = image_size
UpperCAmelCase__ : Optional[int] = batch_norm_eps
| 298 | 1 |
UpperCAmelCase__ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCAmelCase__ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> list[int]:
"""simple docstring"""
_lowercase =True
_lowercase =[]
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__snake_case , __snake_case , __snake_case )
order.append(__snake_case )
return order
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> list[int]:
"""simple docstring"""
_lowercase =True
_lowercase =[vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__snake_case , __snake_case , __snake_case )
return component
def UpperCAmelCase_ ( __snake_case ) -> list[list[int]]:
"""simple docstring"""
_lowercase =len(__snake_case ) * [False]
_lowercase ={vert: [] for vert in range(len(__snake_case ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__snake_case )
_lowercase =[]
for i, was_visited in enumerate(__snake_case ):
if not was_visited:
order += topology_sort(__snake_case , __snake_case , __snake_case )
_lowercase =[]
_lowercase =len(__snake_case ) * [False]
for i in range(len(__snake_case ) ):
_lowercase =order[len(__snake_case ) - i - 1]
if not visited[vert]:
_lowercase =find_components(__snake_case , __snake_case , __snake_case )
components_list.append(__snake_case )
return components_list
| 5 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
if num < 0:
return False
_UpperCamelCase : int = num
_UpperCamelCase : int = 0
while num > 0:
_UpperCamelCase : str = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 0 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_lowerCAmelCase :Dict = logging.getLogger(__name__)
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a__ ='''summarization'''
a__ =['''loss''']
a__ =ROUGE_KEYS
a__ ='''rouge2'''
def __init__( self , A , **A ) -> List[str]:
if hparams.sortish_sampler and hparams.gpus > 1:
_UpperCAmelCase : Tuple = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(UpperCamelCase__ , num_labels=UpperCamelCase__ , mode=self.mode , **UpperCamelCase__ )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
_UpperCAmelCase : str = Path(self.output_dir ) / '''metrics.json'''
_UpperCAmelCase : int = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : Dict = defaultdict(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = self.config.model_type
_UpperCAmelCase : Optional[Any] = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
_UpperCAmelCase : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
_UpperCAmelCase : Dict = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
_UpperCAmelCase : Optional[Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_UpperCAmelCase : str = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
_UpperCAmelCase : int = get_git_info()['''repo_sha''']
_UpperCAmelCase : Optional[Any] = hparams.num_workers
_UpperCAmelCase : Dict = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , UpperCamelCase__ ):
_UpperCAmelCase : List[Any] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_UpperCAmelCase : List[str] = self.decoder_start_token_id
_UpperCAmelCase : str = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Any = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_UpperCAmelCase : Any = self.hparams.eval_max_gen_length
else:
_UpperCAmelCase : List[str] = self.model.config.max_length
_UpperCAmelCase : List[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __lowerCAmelCase ( self , A ) -> Dict[str, List[str]]:
_UpperCAmelCase : List[str] = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(UpperCamelCase__ , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
_UpperCAmelCase : Dict = True
return readable_batch
def __lowerCAmelCase ( self , A , **A ) -> Tuple:
return self.model(UpperCamelCase__ , **UpperCamelCase__ )
def __lowerCAmelCase ( self , A ) -> Any:
_UpperCAmelCase : Tuple = self.tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
return lmap(str.strip , UpperCamelCase__ )
def __lowerCAmelCase ( self , A ) -> Tuple:
_UpperCAmelCase : Any = self.tokenizer.pad_token_id
_UpperCAmelCase : Optional[int] = batch['''input_ids'''], batch['''attention_mask''']
_UpperCAmelCase : Optional[int] = batch['''labels''']
if isinstance(self.model , UpperCamelCase__ ):
_UpperCAmelCase : Optional[Any] = self.model._shift_right(UpperCamelCase__ )
else:
_UpperCAmelCase : Dict = shift_tokens_right(UpperCamelCase__ , UpperCamelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_UpperCAmelCase : Any = decoder_input_ids
self.save_readable_batch(UpperCamelCase__ )
_UpperCAmelCase : str = self(UpperCamelCase__ , attention_mask=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , use_cache=UpperCamelCase__ )
_UpperCAmelCase : str = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_UpperCAmelCase : List[Any] = nn.CrossEntropyLoss(ignore_index=UpperCamelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
_UpperCAmelCase : Tuple = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
_UpperCAmelCase : Union[str, Any] = nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
_UpperCAmelCase : Tuple = label_smoothed_nll_loss(
UpperCamelCase__ , UpperCamelCase__ , self.hparams.label_smoothing , ignore_index=UpperCamelCase__ )
return (loss,)
@property
def __lowerCAmelCase ( self ) -> int:
return self.tokenizer.pad_token_id
def __lowerCAmelCase ( self , A , A ) -> Dict:
_UpperCAmelCase : int = self._step(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = dict(zip(self.loss_names , UpperCamelCase__ ) )
# tokens per batch
_UpperCAmelCase : int = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
_UpperCAmelCase : Dict = batch['''input_ids'''].shape[0]
_UpperCAmelCase : Tuple = batch['''input_ids'''].eq(self.pad ).sum()
_UpperCAmelCase : Optional[int] = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __lowerCAmelCase ( self , A , A ) -> Dict:
return self._generative_step(UpperCamelCase__ )
def __lowerCAmelCase ( self , A , A="val" ) -> Dict:
self.step_count += 1
_UpperCAmelCase : Dict = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
_UpperCAmelCase : Optional[int] = losses['''loss''']
_UpperCAmelCase : str = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
_UpperCAmelCase : List[str] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_UpperCAmelCase : torch.FloatTensor = torch.tensor(UpperCamelCase__ ).type_as(UpperCamelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = {f'{prefix}_avg_{k}': x for k, x in losses.items()}
_UpperCAmelCase : Dict = self.step_count
self.metrics[prefix].append(UpperCamelCase__ ) # callback writes this to self.metrics_save_path
_UpperCAmelCase : List[str] = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'{prefix}_loss': loss,
f'{prefix}_{self.val_metric}': metric_tensor,
}
def __lowerCAmelCase ( self , A , A ) -> Dict:
return calculate_rouge(UpperCamelCase__ , UpperCamelCase__ )
def __lowerCAmelCase ( self , A ) -> dict:
_UpperCAmelCase : Dict = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_UpperCAmelCase : Optional[int] = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=UpperCamelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
_UpperCAmelCase : Union[str, Any] = (time.time() - ta) / batch['''input_ids'''].shape[0]
_UpperCAmelCase : List[str] = self.ids_to_clean_text(UpperCamelCase__ )
_UpperCAmelCase : List[str] = self.ids_to_clean_text(batch['''labels'''] )
_UpperCAmelCase : str = self._step(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = dict(zip(self.loss_names , UpperCamelCase__ ) )
_UpperCAmelCase : Dict = self.calc_generative_metrics(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Dict = np.mean(lmap(UpperCamelCase__ , UpperCamelCase__ ) )
base_metrics.update(gen_time=UpperCamelCase__ , gen_len=UpperCamelCase__ , preds=UpperCamelCase__ , target=UpperCamelCase__ , **UpperCamelCase__ )
return base_metrics
def __lowerCAmelCase ( self , A , A ) -> Union[str, Any]:
return self._generative_step(UpperCamelCase__ )
def __lowerCAmelCase ( self , A ) -> Optional[Any]:
return self.validation_epoch_end(UpperCamelCase__ , prefix='''test''' )
def __lowerCAmelCase ( self , A ) -> SeqaSeqDataset:
_UpperCAmelCase : str = self.n_obs[type_path]
_UpperCAmelCase : Tuple = self.target_lens[type_path]
_UpperCAmelCase : Any = self.dataset_class(
self.tokenizer , type_path=UpperCamelCase__ , n_obs=UpperCamelCase__ , max_target_length=UpperCamelCase__ , **self.dataset_kwargs , )
return dataset
def __lowerCAmelCase ( self , A , A , A = False ) -> DataLoader:
_UpperCAmelCase : Tuple = self.get_dataset(UpperCamelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_UpperCAmelCase : Dict = dataset.make_sortish_sampler(UpperCamelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase__ , num_workers=self.num_workers , sampler=UpperCamelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_UpperCAmelCase : Union[str, Any] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCamelCase__ , batch_sampler=UpperCamelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase__ , num_workers=self.num_workers , sampler=UpperCamelCase__ , )
def __lowerCAmelCase ( self ) -> DataLoader:
_UpperCAmelCase : List[Any] = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=UpperCamelCase__ )
return dataloader
def __lowerCAmelCase ( self ) -> DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def __lowerCAmelCase ( self ) -> DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def __lowerCAmelCase ( A , A ) -> Tuple:
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
add_generic_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
'''--max_source_length''' , default=1_0_2_4 , type=UpperCamelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=5_6 , type=UpperCamelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=1_4_2 , type=UpperCamelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=1_4_2 , type=UpperCamelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=UpperCamelCase__ )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=UpperCamelCase__ )
parser.add_argument('''--max_tokens_per_batch''' , type=UpperCamelCase__ , default=UpperCamelCase__ )
parser.add_argument('''--logger_name''' , type=UpperCamelCase__ , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=UpperCamelCase__ , default=5_0_0 , required=UpperCamelCase__ , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=UpperCamelCase__ , default='''summarization''' , required=UpperCamelCase__ , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=UpperCamelCase__ , default=0.0 , required=UpperCamelCase__ )
parser.add_argument('''--src_lang''' , type=UpperCamelCase__ , default='''''' , required=UpperCamelCase__ )
parser.add_argument('''--tgt_lang''' , type=UpperCamelCase__ , default='''''' , required=UpperCamelCase__ )
parser.add_argument('''--eval_beams''' , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ )
parser.add_argument(
'''--val_metric''' , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=UpperCamelCase__ , default=1 , required=UpperCamelCase__ , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a__ ='''translation'''
a__ =['''loss''']
a__ =['''bleu''']
a__ ='''bleu'''
def __init__( self , A , **A ) -> Optional[Any]:
super().__init__(UpperCamelCase__ , **UpperCamelCase__ )
_UpperCAmelCase : int = hparams.src_lang
_UpperCAmelCase : List[str] = hparams.tgt_lang
def __lowerCAmelCase ( self , A , A ) -> dict:
return calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : str=None ):
Path(args.output_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
check_output_dir(SCREAMING_SNAKE_CASE__ , expected_items=3 )
if model is None:
if "summarization" in args.task:
_UpperCAmelCase : SummarizationModule = SummarizationModule(SCREAMING_SNAKE_CASE__ )
else:
_UpperCAmelCase : SummarizationModule = TranslationModule(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[str] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
_UpperCAmelCase : List[Any] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_UpperCAmelCase : List[Any] = os.environ.get('''WANDB_PROJECT''' , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Any = WandbLogger(name=model.output_dir.name , project=SCREAMING_SNAKE_CASE__ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_UpperCAmelCase : Tuple = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
_UpperCAmelCase : Union[str, Any] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Dict = args.val_metric == '''loss'''
_UpperCAmelCase : pl.Trainer = generic_train(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , SCREAMING_SNAKE_CASE__ ) , early_stopping_callback=SCREAMING_SNAKE_CASE__ , logger=SCREAMING_SNAKE_CASE__ , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
_UpperCAmelCase : Tuple = ''''''
_UpperCAmelCase : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=SCREAMING_SNAKE_CASE__ ) )
if checkpoints:
_UpperCAmelCase : int = checkpoints[-1]
_UpperCAmelCase : int = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_lowerCAmelCase :List[str] = argparse.ArgumentParser()
_lowerCAmelCase :Any = pl.Trainer.add_argparse_args(parser)
_lowerCAmelCase :Optional[int] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_lowerCAmelCase :Optional[int] = parser.parse_args()
main(args)
| 366 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Dict:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
_UpperCAmelCase : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
_UpperCAmelCase : List[str] = DisjunctiveConstraint(A )
self.assertTrue(isinstance(dc.token_ids , A ) )
with self.assertRaises(A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self ) -> List[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
_UpperCAmelCase : Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(A ):
DisjunctiveConstraint(A ) # fails here
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Optional[Any] = [[1, 2, 3], [1, 2, 4]]
_UpperCAmelCase : Optional[int] = DisjunctiveConstraint(A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = dc.update(1 )
_UpperCAmelCase : Dict = stepped is True and completed is False and reset is False
self.assertTrue(A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(2 )
_UpperCAmelCase : Any = stepped is True and completed is False and reset is False
self.assertTrue(A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = dc.update(3 )
_UpperCAmelCase : Optional[int] = stepped is True and completed is True and reset is False
self.assertTrue(A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Tuple = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_UpperCAmelCase : Any = DisjunctiveConstraint(A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 68 | 0 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__a = "src/transformers"
__a = "docs/source/en/tasks"
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case__ : str = f.readlines()
# Find the start prompt.
snake_case__ : int = 0
while not lines[start_index].startswith(_lowerCAmelCase ):
start_index += 1
start_index += 1
snake_case__ : str = start_index
while not lines[end_index].startswith(_lowerCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__a = direct_transformers_import(TRANSFORMERS_PATH)
__a = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__a = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def __snake_case( _lowerCAmelCase ) -> Dict:
snake_case__ : Tuple = TASK_GUIDE_TO_MODELS[task_guide]
snake_case__ : Optional[int] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_lowerCAmelCase , set() )
snake_case__ : List[str] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> Optional[Any]:
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Tuple = _find_text_in_file(
filename=os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
snake_case__ : str = get_model_list_for_task(_lowerCAmelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
""" to fix this.""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__a = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 35 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : int = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case__ : Dict = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case__ : List[str] = 0.01
with locka.acquire():
with pytest.raises(_lowerCAmelCase ):
snake_case__ : str = time.time()
locka.acquire(_lowerCAmelCase )
assert time.time() - _start > timeout
def __snake_case( _lowerCAmelCase ) -> Tuple:
snake_case__ : Dict = """a""" * 1_000 + """.lock"""
snake_case__ : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(_lowerCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
snake_case__ : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_lowerCAmelCase ):
locka.acquire(0 )
| 35 | 1 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_snake_case : Union[str, Any] = logging.get_logger(__name__)
class A ( _a ):
def __init__( self : Dict , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 354 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 179 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __snake_case ( A__ ):
@slow
@require_torch
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int =EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
UpperCAmelCase : Dict =BertTokenizer.from_pretrained('''bert-base-uncased''' )
UpperCAmelCase : Union[str, Any] =bertabert.config.encoder.vocab_size
UpperCAmelCase : Optional[Any] =tokenizer.sep_token_id
UpperCAmelCase : Dict =tokenizer.cls_token_id
UpperCAmelCase : Optional[Any] =128
UpperCAmelCase : Optional[int] =datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
UpperCAmelCase : Any =datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
UpperCAmelCase : Tuple =train_dataset.select(range(32 ) )
UpperCAmelCase : int =val_dataset.select(range(16 ) )
UpperCAmelCase : List[str] =4
def _map_to_encoder_decoder_inputs(snake_case__ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCAmelCase : List[Any] =tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_A , max_length=512 )
UpperCAmelCase : List[Any] =tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_A , max_length=128 )
UpperCAmelCase : Optional[Any] =inputs.input_ids
UpperCAmelCase : List[Any] =inputs.attention_mask
UpperCAmelCase : Dict =outputs.input_ids
UpperCAmelCase : Optional[int] =outputs.input_ids.copy()
UpperCAmelCase : Union[str, Any] =[
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
UpperCAmelCase : List[str] =outputs.attention_mask
assert all(len(_A ) == 512 for x in inputs.input_ids )
assert all(len(_A ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(snake_case__ ):
UpperCAmelCase : Union[str, Any] =pred.label_ids
UpperCAmelCase : str =pred.predictions
# all unnecessary tokens are removed
UpperCAmelCase : List[str] =tokenizer.batch_decode(_A , skip_special_tokens=_A )
UpperCAmelCase : List[Any] =tokenizer.batch_decode(_A , skip_special_tokens=_A )
UpperCAmelCase : Any =sum([int(pred_str[i] == label_str[i] ) for i in range(len(_A ) )] ) / len(_A )
return {"accuracy": accuracy}
# map train dataset
UpperCAmelCase : int =train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_A , batch_size=_A , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
UpperCAmelCase : Optional[int] =val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_A , batch_size=_A , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
UpperCAmelCase : Tuple =self.get_auto_remove_tmp_dir()
UpperCAmelCase : List[Any] =SeqaSeqTrainingArguments(
output_dir=_A , per_device_train_batch_size=_A , per_device_eval_batch_size=_A , predict_with_generate=_A , evaluation_strategy='''steps''' , do_train=_A , do_eval=_A , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCAmelCase : Union[str, Any] =SeqaSeqTrainer(
model=_A , args=_A , compute_metrics=_compute_metrics , train_dataset=_A , eval_dataset=_A , tokenizer=_A , )
# start training
trainer.train()
| 348 | from collections.abc import Sequence
from queue import Queue
class a__ :
def __init__( self : int,_A : List[Any],_A : Optional[Any],_A : Optional[int],_A : int=None,_A : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = start
SCREAMING_SNAKE_CASE_ : List[str] = end
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : List[str] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Optional[int] = left
SCREAMING_SNAKE_CASE_ : str = right
def __repr__( self : Tuple ):
"""simple docstring"""
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class a__ :
def __init__( self : Any,_A : Sequence,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = collection
SCREAMING_SNAKE_CASE_ : Optional[int] = function
if self.collection:
SCREAMING_SNAKE_CASE_ : List[str] = self._build_tree(0,len(_A ) - 1 )
def __UpperCamelCase ( self : int,_A : Any,_A : List[Any] ):
"""simple docstring"""
self._update_tree(self.root,_A,_A )
def __UpperCamelCase ( self : str,_A : Any,_A : List[Any] ):
"""simple docstring"""
return self._query_range(self.root,_A,_A )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : int ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_A,_A,self.collection[start] )
SCREAMING_SNAKE_CASE_ : List[Any] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._build_tree(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._build_tree(mid + 1,_A )
return SegmentTreeNode(_A,_A,self.fn(left.val,right.val ),_A,_A )
def __UpperCamelCase ( self : int,_A : int,_A : Tuple,_A : Dict ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left,_A,_A )
else:
self._update_tree(node.right,_A,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.fn(node.left.val,node.right.val )
def __UpperCamelCase ( self : str,_A : List[str],_A : Optional[int],_A : Optional[Any] ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left,_A,_A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left,_A,node.mid ),self._query_range(node.right,node.mid + 1,_A ),)
else:
# range in right child tree
return self._query_range(node.right,_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ : int = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowerCamelCase : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 18 | 0 |
'''simple docstring'''
import random
def lowercase__ ( __UpperCamelCase )-> bool:
UpperCamelCase = num - 1
UpperCamelCase = 0
while s % 2 == 0:
UpperCamelCase = s // 2
t += 1
for _ in range(5 ):
UpperCamelCase = random.randrange(2 , num - 1 )
UpperCamelCase = pow(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if v != 1:
UpperCamelCase = 0
while v != (num - 1):
if i == t - 1:
return False
else:
UpperCamelCase = i + 1
UpperCamelCase = (v**2) % num
return True
def lowercase__ ( __UpperCamelCase )-> bool:
if num < 2:
return False
UpperCamelCase = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__UpperCamelCase )
def lowercase__ ( __UpperCamelCase = 1024 )-> int:
while True:
UpperCamelCase = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(__UpperCamelCase ):
return num
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num))) | 352 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger()
@dataclass
class a_ :
lowercase = 42
lowercase = field(default_factory=lowerCamelCase )
lowercase = field(default_factory=lowerCamelCase )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = len(list(m.modules() ) ) == 1 or isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ) or isinstance(_SCREAMING_SNAKE_CASE , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_SCREAMING_SNAKE_CASE )
[x.remove() for x in self.handles]
return self
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
return list(filter(lambda _SCREAMING_SNAKE_CASE : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a_ :
lowercase = 42
lowercase = 42
lowercase = 0
lowercase = field(default_factory=lowerCamelCase )
lowercase = field(default_factory=lowerCamelCase )
def __call__( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = Tracker(self.dest )(_SCREAMING_SNAKE_CASE ).parametrized
UpperCamelCase = Tracker(self.src )(_SCREAMING_SNAKE_CASE ).parametrized
UpperCamelCase = list(filter(lambda _SCREAMING_SNAKE_CASE : type(_SCREAMING_SNAKE_CASE ) not in self.src_skip , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase = list(filter(lambda _SCREAMING_SNAKE_CASE : type(_SCREAMING_SNAKE_CASE ) not in self.dest_skip , _SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise Exception(
F"Numbers of operations are different. Source module has {len(_SCREAMING_SNAKE_CASE )} operations while"
F" destination module has {len(_SCREAMING_SNAKE_CASE )}." )
for dest_m, src_m in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"Transfered from={src_m} to={dest_m}" )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True )-> Optional[Any]:
print(F"Converting {name}..." )
with torch.no_grad():
UpperCamelCase = timm.create_model(__UpperCamelCase , pretrained=__UpperCamelCase ).eval()
UpperCamelCase = ResNetForImageClassification(__UpperCamelCase ).eval()
UpperCamelCase = ModuleTransfer(src=__UpperCamelCase , dest=__UpperCamelCase )
UpperCamelCase = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCamelCase )
assert torch.allclose(from_model(__UpperCamelCase ) , our_model(__UpperCamelCase ).logits ), "The model logits don't match the original one."
UpperCamelCase = F"resnet{'-'.join(name.split('resnet' ) )}"
print(__UpperCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=__UpperCamelCase , )
# we can use the convnext one
UpperCamelCase = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=__UpperCamelCase , )
print(F"Pushed {checkpoint_name}" )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True )-> str:
UpperCamelCase = """imagenet-1k-id2label.json"""
UpperCamelCase = 1000
UpperCamelCase = (1, num_labels)
UpperCamelCase = """huggingface/label-files"""
UpperCamelCase = num_labels
UpperCamelCase = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = partial(__UpperCamelCase , num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
UpperCamelCase = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(__UpperCamelCase , names_to_config[model_name] , __UpperCamelCase , __UpperCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 183 | 0 |
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''T5Config'''
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = "mt5"
A = MTaConfig
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = "mt5"
A = MTaConfig
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = "mt5"
A = MTaConfig
| 298 |
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=9_9 , _UpperCAmelCase=1_3 , _UpperCAmelCase=1_6 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=3_2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=3_0 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=None , ) -> int:
__UpperCamelCase : List[str] = parent
__UpperCamelCase : str = batch_size
__UpperCamelCase : str = decoder_seq_length
# For common tests
__UpperCamelCase : Optional[int] = self.decoder_seq_length
__UpperCamelCase : Any = is_training
__UpperCamelCase : Tuple = use_attention_mask
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Dict = vocab_size
__UpperCamelCase : Optional[int] = d_model
__UpperCamelCase : Union[str, Any] = d_model
__UpperCamelCase : int = decoder_layers
__UpperCamelCase : Dict = decoder_layers
__UpperCamelCase : str = decoder_ffn_dim
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : List[Any] = eos_token_id
__UpperCamelCase : int = bos_token_id
__UpperCamelCase : Tuple = pad_token_id
__UpperCamelCase : Tuple = decoder_start_token_id
__UpperCamelCase : Dict = use_cache
__UpperCamelCase : Optional[Any] = max_position_embeddings
__UpperCamelCase : int = None
__UpperCamelCase : Optional[int] = decoder_seq_length
__UpperCamelCase : Optional[int] = 2
__UpperCamelCase : Optional[int] = 1
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : int = None
if self.use_attention_mask:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__UpperCamelCase : List[str] = None
if self.use_labels:
__UpperCamelCase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : Optional[Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]:
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Optional[Any] = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
__UpperCamelCase : Optional[Any] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__UpperCamelCase : str = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
__UpperCamelCase : List[Any] = model(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 )
__UpperCamelCase : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase : Optional[int] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__UpperCamelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Tuple = model(_UpperCAmelCase )["last_hidden_state"]
__UpperCamelCase : Any = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )["last_hidden_state"]
# select random slice
__UpperCamelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__UpperCamelCase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : List[str] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = config_and_inputs
__UpperCamelCase : str = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A = (TrOCRForCausalLM,) if is_torch_available() else ()
A = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A = True
A = False
def a_ (self ) -> List[str]:
__UpperCamelCase : Optional[int] = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase )
__UpperCamelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase )
def a_ (self ) -> Dict:
pass
def a_ (self ) -> Optional[int]:
pass
def a_ (self ) -> Optional[Any]:
pass
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> List[Any]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase )
def a_ (self ) -> Any:
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def a_ (self ) -> Tuple:
pass
| 298 | 1 |
"""simple docstring"""
import math
def snake_case__ ( __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
lowerCamelCase__ : Union[str, Any] =range(3 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : int=1 , **__lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : int =factor * value
lowerCamelCase__ : Optional[int] =value
while not is_prime(lowerCAmelCase__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCAmelCase__ )
return value
| 359 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_lowercase : Any = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 272 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline | 81 |
import warnings
from functools import wraps
from typing import Callable
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Callable ) -> Callable:
'''simple docstring'''
@wraps(SCREAMING_SNAKE_CASE_ )
def _inner_fn(*SCREAMING_SNAKE_CASE_: int , **SCREAMING_SNAKE_CASE_: Union[str, Any] ):
warnings.warn(
(F'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , SCREAMING_SNAKE_CASE_ , )
return fn(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return _inner_fn
| 68 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Union[str, Any] = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = ['DeiTFeatureExtractor']
_lowerCamelCase : List[str] = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 359 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
else:
return a * actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(UpperCAmelCase , UpperCAmelCase )
return actual_power(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 337 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 251 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """bert"""
def __init__( self , __lowerCamelCase=3_0522 , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=512 , __lowerCamelCase=2 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-1_2 , __lowerCamelCase=0 , __lowerCamelCase="absolute" , __lowerCamelCase=True , __lowerCamelCase=None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
__A : Dict = vocab_size
__A : Any = hidden_size
__A : str = num_hidden_layers
__A : int = num_attention_heads
__A : Optional[int] = hidden_act
__A : List[Any] = intermediate_size
__A : Tuple = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[int] = max_position_embeddings
__A : Optional[Any] = type_vocab_size
__A : Optional[Any] = initializer_range
__A : Dict = layer_norm_eps
__A : Any = position_embedding_type
__A : Optional[int] = use_cache
__A : str = classifier_dropout
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCamelCase__( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__A : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__A : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 179 | 0 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__SCREAMING_SNAKE_CASE : List[str] = None
try:
import msvcrt
except ImportError:
__SCREAMING_SNAKE_CASE : Optional[Any] = None
try:
import fcntl
except ImportError:
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__SCREAMING_SNAKE_CASE : Dict = OSError
# Data
# ------------------------------------------------
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
__SCREAMING_SNAKE_CASE : Dict = '''3.0.12'''
__SCREAMING_SNAKE_CASE : Optional[Any] = None
def lowerCAmelCase_( ) -> int:
global _logger
_lowerCamelCase = _logger or logging.getLogger(__name__ )
return _logger
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = lock_file
return None
def __str__( self ):
_lowerCamelCase = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = lock
return None
def __enter__( self ):
return self.lock
def __exit__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
self.lock.release()
return None
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=-1 , lowerCamelCase__=None ):
_lowerCamelCase = max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
_lowerCamelCase = self.hash_filename_if_too_long(lowerCamelCase__ , lowerCamelCase__ )
# The path to the lock file.
_lowerCamelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_lowerCamelCase = None
# The default timeout value.
_lowerCamelCase = timeout
# We use this lock primarily for the lock counter.
_lowerCamelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_lowerCamelCase = 0
return None
@property
def snake_case__ ( self ):
return self._lock_file
@property
def snake_case__ ( self ):
return self._timeout
@timeout.setter
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = float(lowerCamelCase__ )
return None
def snake_case__ ( self ):
raise NotImplementedError()
def snake_case__ ( self ):
raise NotImplementedError()
@property
def snake_case__ ( self ):
return self._lock_file_fd is not None
def snake_case__ ( self , lowerCamelCase__=None , lowerCamelCase__=0.0_5 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
_lowerCamelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_lowerCamelCase = id(self )
_lowerCamelCase = self._lock_file
_lowerCamelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(lowerCamelCase__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_lowerCamelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def snake_case__ ( self , lowerCamelCase__=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_lowerCamelCase = id(self )
_lowerCamelCase = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
_lowerCamelCase = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self ):
self.acquire()
return self
def __exit__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
self.release()
return None
def __del__( self ):
self.release(force=lowerCamelCase__ )
return None
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = os.path.basename(lowerCamelCase__ )
if len(lowerCamelCase__ ) > max_length and max_length > 0:
_lowerCamelCase = os.path.dirname(lowerCamelCase__ )
_lowerCamelCase = str(hash(lowerCamelCase__ ) )
_lowerCamelCase = filename[: max_length - len(lowerCamelCase__ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(lowerCamelCase__ , lowerCamelCase__ )
else:
return path
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=-1 , lowerCamelCase__=None ):
from .file_utils import relative_to_absolute_path
super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ )
_lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def snake_case__ ( self ):
_lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_lowerCamelCase = os.open(self._lock_file , lowerCamelCase__ )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCamelCase__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowerCamelCase__ )
else:
_lowerCamelCase = fd
return None
def snake_case__ ( self ):
_lowerCamelCase = self._lock_file_fd
_lowerCamelCase = None
msvcrt.locking(lowerCamelCase__ , msvcrt.LK_UNLCK , 1 )
os.close(lowerCamelCase__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=-1 , lowerCamelCase__=None ):
_lowerCamelCase = os.statvfs(os.path.dirname(lowerCamelCase__ ) ).f_namemax
super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_lowerCamelCase = os.open(self._lock_file , lowerCamelCase__ )
try:
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCamelCase__ )
else:
_lowerCamelCase = fd
return None
def snake_case__ ( self ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_lowerCamelCase = self._lock_file_fd
_lowerCamelCase = None
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_UN )
os.close(lowerCamelCase__ )
return None
class lowerCamelCase_( A__ ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_lowerCamelCase = os.open(self._lock_file , lowerCamelCase__ )
except OSError:
pass
else:
_lowerCamelCase = fd
return None
def snake_case__ ( self ):
os.close(self._lock_file_fd )
_lowerCamelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__SCREAMING_SNAKE_CASE : List[str] = None
if msvcrt:
__SCREAMING_SNAKE_CASE : Optional[int] = WindowsFileLock
elif fcntl:
__SCREAMING_SNAKE_CASE : List[str] = UnixFileLock
else:
__SCREAMING_SNAKE_CASE : Any = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 73 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = 'trocr'
lowercase__ : Union[str, Any] = ['past_key_values']
lowercase__ : str = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self , lowerCamelCase__=5_0_2_6_5 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=4_0_9_6 , lowerCamelCase__="gelu" , lowerCamelCase__=5_1_2 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=0.0 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , **lowerCamelCase__ , ):
_lowerCamelCase = vocab_size
_lowerCamelCase = d_model
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = activation_function
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = activation_dropout
_lowerCamelCase = init_std
_lowerCamelCase = decoder_layerdrop
_lowerCamelCase = use_cache
_lowerCamelCase = scale_embedding
_lowerCamelCase = use_learned_position_embeddings
_lowerCamelCase = layernorm_embedding
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
| 73 | 1 |
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = data
_snake_case = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0]
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = B'\x80' + B'\x00' * (63 - (len(self.data ) + 8) % 64)
_snake_case = self.data + padding + struct.pack('>Q' , 8 * len(self.data ) )
return padded_data
def lowerCamelCase ( self ):
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = list(struct.unpack('>16L' , lowerCAmelCase_ ) ) + [0] * 64
for i in range(16 , 80 ):
_snake_case = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.padding()
_snake_case = self.split_blocks()
for block in self.blocks:
_snake_case = self.expand_block(lowerCAmelCase_ )
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
_snake_case = (b & c) | ((~b) & d)
_snake_case = 0X5_A_8_2_7_9_9_9
elif 20 <= i < 40:
_snake_case = b ^ c ^ d
_snake_case = 0X6_E_D_9_E_B_A_1
elif 40 <= i < 60:
_snake_case = (b & c) | (b & d) | (c & d)
_snake_case = 0X8_F_1_B_B_C_D_C
elif 60 <= i < 80:
_snake_case = b ^ c ^ d
_snake_case = 0XC_A_6_2_C_1_D_6
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = (
self.rotate(lowerCAmelCase_ , 5 ) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F,
a,
self.rotate(lowerCAmelCase_ , 30 ),
c,
d,
)
_snake_case = (
self.h[0] + a & 0XF_F_F_F_F_F_F_F,
self.h[1] + b & 0XF_F_F_F_F_F_F_F,
self.h[2] + c & 0XF_F_F_F_F_F_F_F,
self.h[3] + d & 0XF_F_F_F_F_F_F_F,
self.h[4] + e & 0XF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
_snake_case = b'Test String'
assert SHAaHash(__A ).final_hash() == hashlib.shaa(__A ).hexdigest() # noqa: S324
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
_snake_case = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
_snake_case = parser.parse_args()
_snake_case = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_snake_case = f.read()
else:
_snake_case = bytes(__A , 'utf-8' )
print(SHAaHash(__A ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 42 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_SCREAMING_SNAKE_CASE : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
_SCREAMING_SNAKE_CASE : Any = {
'''unc-nlp/lxmert-base-uncased''': 512,
}
_SCREAMING_SNAKE_CASE : int = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Any = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Optional[Any] = LxmertTokenizer
def __init__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]="[UNK]" , __SCREAMING_SNAKE_CASE : int="[SEP]" , __SCREAMING_SNAKE_CASE : int="[PAD]" , __SCREAMING_SNAKE_CASE : List[Any]="[CLS]" , __SCREAMING_SNAKE_CASE : Any="[MASK]" , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Any , ) -> Any:
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('strip_accents' , __SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('type' ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = do_lower_case
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple=None ) -> Dict:
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
lowerCamelCase_ = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
| 183 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : str = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__UpperCAmelCase : ClassVar[Features] = Features({'''image''': Image()} )
__UpperCAmelCase : ClassVar[Features] = Features({'''labels''': ClassLabel} )
__UpperCAmelCase : str = "image"
__UpperCAmelCase : str = "labels"
def lowerCamelCase ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , UpperCamelCase__ ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
snake_case : Union[str, Any] = copy.deepcopy(self )
snake_case : Union[str, Any] = self.label_schema.copy()
snake_case : int = features[self.label_column]
snake_case : List[Any] = label_schema
return task_template
@property
def lowerCamelCase ( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 112 |
"""simple docstring"""
import string
def __lowerCAmelCase ( lowercase : str ) -> str:
"""simple docstring"""
snake_case : List[str] = ""
for i in sequence:
snake_case : Optional[Any] = ord(lowercase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __lowerCAmelCase ( lowercase : str ) -> str:
"""simple docstring"""
snake_case : Dict = string.ascii_letters
snake_case : List[Any] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase )] if c in letters else c for c in sequence )
def __lowerCAmelCase ( ) -> None:
"""simple docstring"""
from timeit import timeit
print("Running performance benchmarks..." )
snake_case : Optional[int] = "from string import printable ; from __main__ import atbash, atbash_slow"
print(F'> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowercase )} seconds' )
print(F'> atbash(): {timeit("atbash(printable)" , setup=lowercase )} seconds' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 112 | 1 |
from typing import Any
import numpy as np
def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray ) -> bool:
"""simple docstring"""
return np.array_equal(UpperCamelCase__ , matrix.conjugate().T )
def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray ) -> Any:
"""simple docstring"""
__lowerCamelCase = v.conjugate().T
__lowerCamelCase = v_star.dot(UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , np.ndarray )
return (v_star_dot.dot(UpperCamelCase__ )) / (v_star.dot(UpperCamelCase__ ))
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
__lowerCamelCase = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowerCamelCase = np.array([[1], [2], [3]] )
assert is_hermitian(UpperCamelCase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(UpperCamelCase__ , UpperCamelCase__ ) )
__lowerCamelCase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(UpperCamelCase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(UpperCamelCase__ , UpperCamelCase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 90 | '''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''ernie_m'''
UpperCAmelCase_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowerCAmelCase = 250002 , __lowerCAmelCase = 768 , __lowerCAmelCase = 12 , __lowerCAmelCase = 12 , __lowerCAmelCase = 3072 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 514 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1E-0_5 , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = classifier_dropout
lowerCAmelCase = is_decoder
lowerCAmelCase = act_dropout
| 272 | 0 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_snake_case = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Any , __lowerCamelCase: str , __lowerCamelCase: bool , __lowerCamelCase: str = None , __lowerCamelCase: list = None ) -> str:
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Any = os.path.abspath(os.path.join("examples" , "by_feature" ) )
__UpperCAmelCase : List[Any] = os.path.abspath("examples" )
for item in os.listdir(__lowerCamelCase ):
if item not in EXCLUDE_EXAMPLES:
__UpperCAmelCase : List[Any] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ) and ".py" in item_path:
with self.subTest(
tested_script=__lowerCamelCase , feature_script=__lowerCamelCase , tested_section="main()" if parser_only else "training_function()" , ):
__UpperCAmelCase : List[Any] = compare_against_test(
os.path.join(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = "\n".join(__lowerCamelCase )
if special_strings is not None:
for string in special_strings:
__UpperCAmelCase : Dict = diff.replace(__lowerCamelCase , "" )
self.assertEqual(__lowerCamelCase , "" )
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
self.one_complete_example("complete_nlp_example.py" , __lowerCamelCase )
self.one_complete_example("complete_nlp_example.py" , __lowerCamelCase )
def _lowerCamelCase ( self: str ) -> int:
__UpperCAmelCase : Union[str, Any] = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
__UpperCAmelCase : List[str] = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
self.one_complete_example("complete_cv_example.py" , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = False
@classmethod
def _lowerCamelCase ( cls: Tuple ) -> Any:
super().setUpClass()
__UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCAmelCase : str = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
__UpperCAmelCase : Any = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def _lowerCamelCase ( cls: Tuple ) -> Dict:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _lowerCamelCase ( self: Union[str, Any] ) -> Any:
__UpperCAmelCase : Tuple = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def _lowerCamelCase ( self: List[Any] ) -> Optional[int]:
__UpperCAmelCase : Optional[Any] = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
__UpperCAmelCase : Optional[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def _lowerCamelCase ( self: Optional[int] ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
'''.split()
__UpperCAmelCase : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=__lowerCamelCase )
self.assertNotIn("epoch 0:" , __lowerCamelCase )
self.assertIn("epoch 1:" , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Dict = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
'''.split()
__UpperCAmelCase : Any = run_command(self._launch_args + testargs , return_stdout=__lowerCamelCase )
if torch.cuda.is_available():
__UpperCAmelCase : Dict = torch.cuda.device_count()
else:
__UpperCAmelCase : int = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , __lowerCamelCase )
self.assertIn("epoch 1:" , __lowerCamelCase )
else:
self.assertIn("epoch 0:" , __lowerCamelCase )
self.assertIn("epoch 1:" , __lowerCamelCase )
@slow
def _lowerCamelCase ( self: Optional[Any] ) -> int:
__UpperCAmelCase : Optional[Any] = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
__UpperCAmelCase : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=__lowerCamelCase )
__UpperCAmelCase : Dict = re.findall("({.+})" , __lowerCamelCase )
__UpperCAmelCase : Any = [r for r in results if "accuracy" in r][-1]
__UpperCAmelCase : Any = ast.literal_eval(__lowerCamelCase )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def _lowerCamelCase ( self: int ) -> List[Any]:
__UpperCAmelCase : Any = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowerCamelCase ( self: Any ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCAmelCase : int = f'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , "tracking" ) ) )
def _lowerCamelCase ( self: Any ) -> Optional[int]:
__UpperCAmelCase : int = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase : List[Any] = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 370 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = "roc_bert"
def __init__( self: int , __lowerCamelCase: Union[str, Any]=3_05_22 , __lowerCamelCase: int=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: int=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: str=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]="absolute" , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=9_10 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=2_48_58 , __lowerCamelCase: Optional[int]=True , **__lowerCamelCase: Any , ) -> List[Any]:
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : Optional[Any] = enable_pronunciation
__UpperCAmelCase : Any = enable_shape
__UpperCAmelCase : Union[str, Any] = pronunciation_embed_dim
__UpperCAmelCase : Optional[Any] = pronunciation_vocab_size
__UpperCAmelCase : Optional[Any] = shape_embed_dim
__UpperCAmelCase : List[Any] = shape_vocab_size
__UpperCAmelCase : int = concat_input
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = classifier_dropout
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
| 342 | 0 |
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: List[Any] ) -> list:
'''simple docstring'''
for i in range(len(_UpperCamelCase ) - 1 , 0 , -1 ):
__lowerCamelCase : Optional[Any] = False
for j in range(_UpperCamelCase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__lowerCamelCase : Dict = unsorted[j - 1], unsorted[j]
__lowerCamelCase : Any = True
for j in range(_UpperCamelCase ):
if unsorted[j] > unsorted[j + 1]:
__lowerCamelCase : Union[str, Any] = unsorted[j + 1], unsorted[j]
__lowerCamelCase : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = input('''Enter numbers separated by a comma:\n''').strip()
__A = [int(item) for item in user_input.split(''',''')]
print(F"""{cocktail_shaker_sort(unsorted) = }""") | 135 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Any = 'yolos'
def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[512, 864] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = hidden_size
lowercase : int = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : str = intermediate_size
lowercase : Dict = hidden_act
lowercase : int = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : List[Any] = initializer_range
lowercase : Optional[int] = layer_norm_eps
lowercase : str = image_size
lowercase : Dict = patch_size
lowercase : str = num_channels
lowercase : Optional[int] = qkv_bias
lowercase : List[str] = num_detection_tokens
lowercase : List[str] = use_mid_position_embeddings
lowercase : Dict = auxiliary_loss
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : Any = bbox_cost
lowercase : int = giou_cost
# Loss coefficients
lowercase : Dict = bbox_loss_coefficient
lowercase : Optional[Any] = giou_loss_coefficient
lowercase : Tuple = eos_coefficient
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = version.parse('1.11' )
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
@property
def __lowerCamelCase ( self ):
return 12
| 337 | 0 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _lowerCamelCase( lowercase__=None , lowercase__=None ) -> Optional[Any]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowercase__ )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''The csv file to plot.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
UpperCamelCase_ : Optional[List[str]] =list_field(
default=A_ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
try:
int(lowercase__ )
return True
except ValueError:
return False
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
try:
float(lowercase__ )
return True
except ValueError:
return False
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= args
__lowercase= defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
__lowercase= csv.DictReader(lowerCAmelCase )
for row in reader:
__lowercase= row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
__lowercase= int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
__lowercase= float(row['result'] )
def _A (self ):
__lowercase, __lowercase= plt.subplots()
__lowercase= 'Time usage' if self.args.is_time else 'Memory usage'
__lowercase= title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__lowercase= sorted(set(self.result_dict[model_name]['bsz'] ) )
__lowercase= sorted(set(self.result_dict[model_name]['seq_len'] ) )
__lowercase= self.result_dict[model_name]['result']
((__lowercase), (__lowercase))= (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__lowercase= (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__lowercase= np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase , )
else:
__lowercase= np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__lowercase), (__lowercase))= (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
__lowercase= np.asarray(lowerCAmelCase , lowerCAmelCase )[: len(lowerCAmelCase )]
plt.scatter(
lowerCAmelCase , lowerCAmelCase , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' )
plt.plot(lowerCAmelCase , lowerCAmelCase , '--' )
title_str += f' {label_model_name} vs.'
__lowercase= title_str[:-4]
__lowercase= 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(lowerCAmelCase )
plt.xlabel(lowerCAmelCase )
plt.ylabel(lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _lowerCamelCase( ) -> Tuple:
'''simple docstring'''
__lowercase= HfArgumentParser(lowercase__ )
__lowercase= parser.parse_args_into_dataclasses()[0]
__lowercase= Plot(args=lowercase__ )
plot.plot()
if __name__ == "__main__":
main()
| 351 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''albert'''
def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowercase= vocab_size
__lowercase= embedding_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_hidden_groups
__lowercase= num_attention_heads
__lowercase= inner_group_num
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= classifier_dropout_prob
__lowercase= position_embedding_type
class A ( A_ ):
@property
def _A (self ):
if self.task == "multiple-choice":
__lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 304 | 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
__lowerCamelCase : str = tmp_path / 'cache'
__lowerCamelCase : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCamelCase : Tuple = ParquetDatasetReader(lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ ).read()
_check_parquet_dataset(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
__lowerCamelCase : Union[str, Any] = tmp_path / 'cache'
__lowerCamelCase : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowerCamelCase : Union[str, Any] = features.copy() if features else default_expected_features
__lowerCamelCase : Union[str, Any] = (
Features({feature: Value(lowerCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCamelCase : Dict = ParquetDatasetReader(lowerCamelCase__ , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_parquet_dataset(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
__lowerCamelCase : Union[str, Any] = tmp_path / 'cache'
__lowerCamelCase : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowerCamelCase : Tuple = ParquetDatasetReader(lowerCamelCase__ , cache_dir=lowerCamelCase__ , split=lowerCamelCase__ ).read()
_check_parquet_dataset(lowerCamelCase__ , lowerCamelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
if issubclass(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = parquet_path
elif issubclass(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : Dict = [parquet_path]
__lowerCamelCase : List[Any] = tmp_path / 'cache'
__lowerCamelCase : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowerCamelCase : Dict = ParquetDatasetReader(lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_parquet_dataset(lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=("train",) ) -> List[str]:
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
for split in splits:
__lowerCamelCase : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = tmp_path / 'cache'
__lowerCamelCase : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCamelCase : List[str] = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ ).read()
_check_parquet_datasetdict(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
__lowerCamelCase : Tuple = tmp_path / 'cache'
__lowerCamelCase : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowerCamelCase : List[str] = features.copy() if features else default_expected_features
__lowerCamelCase : str = (
Features({feature: Value(lowerCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCamelCase : Union[str, Any] = ParquetDatasetReader({'train': parquet_path} , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_parquet_datasetdict(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
if split:
__lowerCamelCase : List[str] = {split: parquet_path}
else:
__lowerCamelCase : List[str] = 'train'
__lowerCamelCase : List[Any] = {'train': parquet_path, 'test': parquet_path}
__lowerCamelCase : Tuple = tmp_path / 'cache'
__lowerCamelCase : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowerCamelCase : Optional[Any] = ParquetDatasetReader(lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_parquet_datasetdict(lowerCamelCase__ , lowerCamelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
__lowerCamelCase : Union[str, Any] = ParquetDatasetWriter(lowerCamelCase__ , tmp_path / 'foo.parquet' )
assert writer.write() > 0
__lowerCamelCase : Union[str, Any] = pq.ParquetFile(tmp_path / 'foo.parquet' )
__lowerCamelCase : List[str] = pf.read()
assert dataset.data.table == output_table
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : List[str] = str(shared_datadir / 'test_image_rgb.jpg' )
__lowerCamelCase : Optional[int] = {'image': [image_path]}
__lowerCamelCase : int = Features({'image': Image()} )
__lowerCamelCase : Union[str, Any] = Dataset.from_dict(lowerCamelCase__ , features=lowerCamelCase__ )
__lowerCamelCase : Any = ParquetDatasetWriter(lowerCamelCase__ , tmp_path / 'foo.parquet' )
assert writer.write() > 0
__lowerCamelCase : int = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
__lowerCamelCase : Dict = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=lowerCamelCase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
assert get_writer_batch_size(lowerCamelCase__ ) == expected
| 73 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
a ="""\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
a ="""\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
a ="""
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
__lowerCamelCase : Optional[Any] = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Tuple = float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Any = np.array(lowerCamelCase__ )
__lowerCamelCase : List[Any] = np.array(lowerCamelCase__ )
__lowerCamelCase : Any = en_sentvecs.shape[0]
# mean centering
__lowerCamelCase : Union[str, Any] = en_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
__lowerCamelCase : Dict = in_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
__lowerCamelCase : Optional[int] = cdist(lowerCamelCase__ , lowerCamelCase__ , 'cosine' )
__lowerCamelCase : Optional[Any] = np.array(range(lowerCamelCase__ ) )
__lowerCamelCase : Dict = sim.argsort(axis=1 )[:, :1_0]
__lowerCamelCase : Optional[int] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowerCAmelCase ( self : Optional[Any]):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
'references': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
}) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' if self.config_name != 'cvit-mkb-clsr' else None ,)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
| 73 | 1 |
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ): # This function is recursive
"""simple docstring"""
_UpperCAmelCase = len(lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_UpperCAmelCase = array[0]
_UpperCAmelCase = False
_UpperCAmelCase = 1
_UpperCAmelCase = []
while not is_found and i < array_length:
if array[i] < pivot:
_UpperCAmelCase = True
_UpperCAmelCase = [element for element in array[i:] if element >= array[i]]
_UpperCAmelCase = longest_subsequence(lowercase )
if len(lowercase ) > len(lowercase ):
_UpperCAmelCase = temp_array
else:
i += 1
_UpperCAmelCase = [element for element in array[1:] if element >= pivot]
_UpperCAmelCase = [pivot, *longest_subsequence(lowercase )]
if len(lowercase ) > len(lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 | """simple docstring"""
import os
import pytest
from attr import dataclass
UpperCAmelCase__ = """us-east-1""" # defaults region
@dataclass
class a :
_snake_case : str
_snake_case : Tuple = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_snake_case : List[Any] = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_00,
'save_steps': 55_00,
}
_snake_case : Optional[Any] = {**hyperparameters, 'max_steps': 10_00}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase_ ( self : Dict ):
return f'''{self.framework}-transfromers-test'''
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 30 | 0 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase_ ( _lowerCamelCase: Any , _lowerCamelCase: Dict , _lowerCamelCase: Optional[Any] , _lowerCamelCase: Optional[int] , _lowerCamelCase: Any ):
# Load configuration defined in the metadata file
with open(_lowerCamelCase ) as metadata_file:
__SCREAMING_SNAKE_CASE : List[str] = json.load(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = LukeConfig(use_entity_aware_attention=_lowerCamelCase , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
__SCREAMING_SNAKE_CASE : Dict = torch.load(_lowerCamelCase , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
__SCREAMING_SNAKE_CASE : str = load_original_entity_vocab(_lowerCamelCase )
# add an entry for [MASK2]
__SCREAMING_SNAKE_CASE : Optional[int] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__SCREAMING_SNAKE_CASE : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
__SCREAMING_SNAKE_CASE : Union[str, Any] = AddedToken("""<ent>""" , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = AddedToken("""<ent2>""" , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , """tokenizer_config.json""" ) , """r""" ) as f:
__SCREAMING_SNAKE_CASE : Tuple = json.load(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = """MLukeTokenizer"""
with open(os.path.join(_lowerCamelCase , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
with open(os.path.join(_lowerCamelCase , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = MLukeTokenizer.from_pretrained(_lowerCamelCase )
# Initialize the embeddings of the special tokens
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
__SCREAMING_SNAKE_CASE : List[str] = state_dict["""embeddings.word_embeddings.weight"""]
__SCREAMING_SNAKE_CASE : Dict = word_emb[ent_init_index].unsqueeze(0 )
__SCREAMING_SNAKE_CASE : Dict = word_emb[enta_init_index].unsqueeze(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__SCREAMING_SNAKE_CASE : List[str] = state_dict[bias_name]
__SCREAMING_SNAKE_CASE : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_bias[enta_init_index].unsqueeze(0 )
__SCREAMING_SNAKE_CASE : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__SCREAMING_SNAKE_CASE : int = F"encoder.layer.{layer_index}.attention.self."
__SCREAMING_SNAKE_CASE : int = state_dict[prefix + matrix_name]
__SCREAMING_SNAKE_CASE : List[str] = state_dict[prefix + matrix_name]
__SCREAMING_SNAKE_CASE : List[str] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__SCREAMING_SNAKE_CASE : Optional[Any] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
__SCREAMING_SNAKE_CASE : Tuple = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__SCREAMING_SNAKE_CASE : int = state_dict["""entity_predictions.bias"""]
__SCREAMING_SNAKE_CASE : str = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([entity_prediction_bias, entity_mask_bias] )
__SCREAMING_SNAKE_CASE : List[str] = LukeForMaskedLM(config=_lowerCamelCase ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
__SCREAMING_SNAKE_CASE : int = state_dict[key]
else:
__SCREAMING_SNAKE_CASE : Tuple = state_dict[key]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
if set(_lowerCamelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"Unexpected unexpected_keys: {unexpected_keys}" )
if set(_lowerCamelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__SCREAMING_SNAKE_CASE : Dict = MLukeTokenizer.from_pretrained(_lowerCamelCase , task="""entity_classification""" )
__SCREAMING_SNAKE_CASE : List[Any] = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
__SCREAMING_SNAKE_CASE : Tuple = (0, 9)
__SCREAMING_SNAKE_CASE : Any = tokenizer(_lowerCamelCase , entity_spans=[span] , return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**_lowerCamelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 33, 7_68) )
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__SCREAMING_SNAKE_CASE : List[Any] = torch.Size((1, 1, 7_68) )
__SCREAMING_SNAKE_CASE : Any = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
__SCREAMING_SNAKE_CASE : List[str] = MLukeTokenizer.from_pretrained(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = """Tokyo is the capital of <mask>."""
__SCREAMING_SNAKE_CASE : str = (24, 30)
__SCREAMING_SNAKE_CASE : str = tokenizer(_lowerCamelCase , entity_spans=[span] , return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE : List[Any] = model(**_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = encoding["""input_ids"""][0].tolist()
__SCREAMING_SNAKE_CASE : Dict = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
__SCREAMING_SNAKE_CASE : Dict = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = outputs.entity_logits[0][0].argmax().item()
__SCREAMING_SNAKE_CASE : Any = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(_lowerCamelCase ) )
model.save_pretrained(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] ):
__SCREAMING_SNAKE_CASE : Optional[int] = ["""[MASK]""", """[PAD]""", """[UNK]"""]
__SCREAMING_SNAKE_CASE : Optional[int] = [json.loads(_lowerCamelCase ) for line in open(_lowerCamelCase )]
__SCREAMING_SNAKE_CASE : Optional[int] = {}
for entry in data:
__SCREAMING_SNAKE_CASE : Tuple = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__SCREAMING_SNAKE_CASE : Union[str, Any] = entity_id
break
__SCREAMING_SNAKE_CASE : str = F"{language}:{entity_name}"
__SCREAMING_SNAKE_CASE : Union[str, Any] = entity_id
return new_mapping
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
UpperCamelCase__ : Optional[int] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 112 |
'''simple docstring'''
import string
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : Dict = """"""
for i in sequence:
__SCREAMING_SNAKE_CASE : Any = ord(_lowerCamelCase )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : Optional[Any] = string.ascii_letters
__SCREAMING_SNAKE_CASE : Union[str, Any] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_lowerCamelCase )] if c in letters else c for c in sequence )
def lowerCAmelCase_ ( ):
from timeit import timeit
print("""Running performance benchmarks...""" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = """from string import printable ; from __main__ import atbash, atbash_slow"""
print(F"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=_lowerCamelCase )} seconds" )
print(F"> atbash(): {timeit('atbash(printable)' , setup=_lowerCamelCase )} seconds" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"{example} encrypted in atbash: {atbash(example)}")
benchmark() | 112 | 1 |
from __future__ import annotations
class A__ :
"""simple docstring"""
def __init__( self , __snake_case ):
snake_case = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(_a ) != 0:
snake_case = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_a ) != cols:
raise error
for value in row:
if not isinstance(_a , (int, float) ):
raise error
snake_case = rows
else:
snake_case = []
def a_ ( self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def a_ ( self ):
return len(self.rows )
@property
def a_ ( self ):
return len(self.rows[0] )
@property
def a_ ( self ):
return (self.num_rows, self.num_columns)
@property
def a_ ( self ):
return self.order[0] == self.order[1]
def a_ ( self ):
snake_case = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_a )
def a_ ( self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def a_ ( self ):
return bool(self.determinant() )
def a_ ( self , __snake_case , __snake_case ):
snake_case = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_a ).determinant()
def a_ ( self , __snake_case , __snake_case ):
if (row + column) % 2 == 0:
return self.get_minor(_a , _a )
return -1 * self.get_minor(_a , _a )
def a_ ( self ):
return Matrix(
[
[self.get_minor(_a , _a ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def a_ ( self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def a_ ( self ):
snake_case = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_a )
def a_ ( self ):
snake_case = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
return str(self.rows )
def __str__( self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(_a ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def a_ ( self , __snake_case , __snake_case = None ):
snake_case = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(_a , _a ):
raise type_error
for value in row:
if not isinstance(_a , (int, float) ):
raise type_error
if len(_a ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(_a )
else:
snake_case = self.rows[0:position] + [row] + self.rows[position:]
def a_ ( self , __snake_case , __snake_case = None ):
snake_case = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(_a , _a ):
raise type_error
for value in column:
if not isinstance(_a , (int, float) ):
raise type_error
if len(_a ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
snake_case = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
snake_case = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , __snake_case ):
if not isinstance(_a , _a ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , __snake_case ):
return not self == other
def __neg__( self ):
return self * -1
def __add__( self , __snake_case ):
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , __snake_case ):
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , __snake_case ):
if isinstance(_a , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_a , _a ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(_a , _a ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self , __snake_case ):
if not isinstance(_a , _a ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
snake_case = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def a_ ( cls , __snake_case , __snake_case ):
return sum(row[i] * column[i] for i in range(len(_a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = UnCLIPImageVariationPipeline
__magic_name__ = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
__magic_name__ = IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
__magic_name__ = False
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return self.time_input_dim
@property
def a_ ( self ):
return self.time_input_dim * 4
@property
def a_ ( self ):
return 1_0_0
@property
def a_ ( self ):
snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__snake_case )
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , )
return CLIPVisionModelWithProjection(__snake_case )
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
snake_case = UnCLIPTextProjModel(**__snake_case )
return model
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = {
'''sample_size''': 3_2,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def a_ ( self ):
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def a_ ( self ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
snake_case = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def a_ ( self ):
snake_case = self.dummy_decoder
snake_case = self.dummy_text_proj
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_super_res_first
snake_case = self.dummy_super_res_last
snake_case = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
snake_case = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
snake_case = CLIPImageProcessor(crop_size=3_2 , size=3_2 )
snake_case = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def a_ ( self , __snake_case , __snake_case=0 , __snake_case=True ):
snake_case = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith('''mps''' ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
if pil_image:
snake_case = input_image * 0.5 + 0.5
snake_case = input_image.clamp(0 , 1 )
snake_case = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case = DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
snake_case = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = torch.device('''cpu''' )
class A__ :
"""simple docstring"""
__magic_name__ = 1
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device=__snake_case ).manual_seed(0 )
snake_case = pipe.decoder.dtype
snake_case = 1
snake_case = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
snake_case = pipe.prepare_latents(
__snake_case , dtype=__snake_case , device=__snake_case , generator=__snake_case , latents=__snake_case , scheduler=DummyScheduler() )
snake_case = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
snake_case = pipe.prepare_latents(
__snake_case , dtype=__snake_case , device=__snake_case , generator=__snake_case , latents=__snake_case , scheduler=DummyScheduler() )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , decoder_latents=__snake_case , super_res_latents=__snake_case ).images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
# Don't pass image, instead pass embedding
snake_case = pipeline_inputs.pop('''image''' )
snake_case = pipe.image_encoder(__snake_case ).image_embeds
snake_case = pipe(
**__snake_case , decoder_latents=__snake_case , super_res_latents=__snake_case , image_embeddings=__snake_case , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def a_ ( self ):
snake_case = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
snake_case = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=__snake_case , expected_max_diff=__snake_case )
@skip_mps
def a_ ( self ):
snake_case = torch_device == '''cpu'''
snake_case = True
snake_case = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=__snake_case , relax_max_difference=__snake_case , additional_params_copy_to_batched_inputs=__snake_case , )
def a_ ( self ):
snake_case = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
snake_case = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__snake_case , additional_params_copy_to_batched_inputs=__snake_case , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__snake_case )
@skip_mps
def a_ ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a_ ( self ):
return super().test_save_load_local()
@skip_mps
def a_ ( self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self ):
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
snake_case = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case = pipeline(
__snake_case , generator=__snake_case , output_type='''np''' , )
snake_case = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(__snake_case , __snake_case , 1_5 )
| 213 | 0 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase : Union[str, Any] = logging.getLogger()
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument('-f' )
snake_case_ = parser.parse_args()
return args.f
class lowercase ( _lowerCAmelCase ):
def a ( self ):
snake_case_ = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase__ )
def a ( self , snake_case ):
snake_case_ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , 'run_glue_deebert.py' )
with patch.object(lowerCAmelCase__ , 'argv' , lowerCAmelCase__ ):
snake_case_ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCAmelCase__ , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def a ( self ):
snake_case_ = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(lowerCAmelCase__ )
snake_case_ = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCAmelCase__ )
snake_case_ = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCAmelCase__ )
| 285 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> str:
__magic_name__ : Tuple = """ylacombe/bark-small"""
__magic_name__ : List[str] = tempfile.mkdtemp()
__magic_name__ : Optional[Any] = """en_speaker_1"""
__magic_name__ : Union[str, Any] = """This is a test string"""
__magic_name__ : Optional[int] = """speaker_embeddings_path.json"""
__magic_name__ : Any = """speaker_embeddings"""
def __magic_name__ ( self , **lowerCAmelCase__ ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Optional[Any] = self.get_tokenizer()
__magic_name__ : int = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__magic_name__ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__magic_name__ : str = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__ ( self ) -> Any:
__magic_name__ : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__magic_name__ : Union[str, Any] = 35
__magic_name__ : List[Any] = 2
__magic_name__ : Dict = 8
__magic_name__ : Tuple = {
"""semantic_prompt""": np.ones(lowerCAmelCase__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__magic_name__ : Optional[int] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__magic_name__ : Dict = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Optional[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__magic_name__ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__magic_name__ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : str = self.get_tokenizer()
__magic_name__ : Dict = BarkProcessor(tokenizer=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = processor(text=self.input_string )
__magic_name__ : List[Any] = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 342 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "glpn"
def __init__( self : Dict , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : Dict=[2, 2, 2, 2] , _lowerCAmelCase : int=[8, 4, 2, 1] , _lowerCAmelCase : List[Any]=[32, 64, 160, 256] , _lowerCAmelCase : Union[str, Any]=[7, 3, 3, 3] , _lowerCAmelCase : List[str]=[4, 2, 2, 2] , _lowerCAmelCase : Tuple=[1, 2, 5, 8] , _lowerCAmelCase : List[str]=[4, 4, 4, 4] , _lowerCAmelCase : int="gelu" , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : Dict=1E-6 , _lowerCAmelCase : List[Any]=64 , _lowerCAmelCase : List[Any]=10 , _lowerCAmelCase : Optional[int]=-1 , **_lowerCAmelCase : Tuple , ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_encoder_blocks
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = sr_ratios
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = patch_sizes
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = mlp_ratios
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = decoder_hidden_size
SCREAMING_SNAKE_CASE_ = max_depth
SCREAMING_SNAKE_CASE_ = head_in_index | 353 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any=7 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : List[str]=18 , _lowerCAmelCase : Any=30 , _lowerCAmelCase : List[Any]=400 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]=[0.5, 0.5, 0.5] , _lowerCAmelCase : Union[str, Any]=[0.5, 0.5, 0.5] , ):
SCREAMING_SNAKE_CASE_ = size if size is not None else {'shortest_edge': 18}
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean
SCREAMING_SNAKE_CASE_ = image_std
def lowerCAmelCase_ ( self : List[Any] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = LevitImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = LevitImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'size' ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowerCAmelCase_ ( self : Dict ):
pass
def lowerCAmelCase_ ( self : int ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCAmelCase_ ( self : str ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCAmelCase_ ( self : Tuple ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 210 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def lowercase_ ( _lowercase ) -> Tuple:
'''simple docstring'''
return np.maximum(0 , _lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 318 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_UpperCamelCase : Optional[int] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_UpperCamelCase : List[str] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_UpperCamelCase : Tuple = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_UpperCamelCase : str = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCamelCase : Optional[int] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_UpperCamelCase : List[str] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __UpperCAmelCase ( A : Optional[int] ) -> int:
UpperCAmelCase_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , A )
return [m.group(0 ) for m in matches]
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCAmelCase_ : Optional[Any] = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
UpperCAmelCase_ : Dict = collections.defaultdict(A )
UpperCAmelCase_ : str = collections.defaultdict(A )
UpperCAmelCase_ : int = collections.defaultdict(A )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(A ):
UpperCAmelCase_ : int = None
if _re_tf_models.match(A ) is not None:
UpperCAmelCase_ : Optional[Any] = tf_models
UpperCAmelCase_ : Optional[int] = _re_tf_models.match(A ).groups()[0]
elif _re_flax_models.match(A ) is not None:
UpperCAmelCase_ : int = flax_models
UpperCAmelCase_ : Any = _re_flax_models.match(A ).groups()[0]
elif _re_pt_models.match(A ) is not None:
UpperCAmelCase_ : Union[str, Any] = pt_models
UpperCAmelCase_ : List[Any] = _re_pt_models.match(A ).groups()[0]
if lookup_dict is not None:
while len(A ) > 0:
if attr_name in model_prefix_to_model_type:
UpperCAmelCase_ : Optional[int] = True
break
# Try again after removing the last word in the name
UpperCAmelCase_ : List[Any] = ''''''.join(camel_case_split(A )[:-1] )
UpperCAmelCase_ : Tuple = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
UpperCAmelCase_ : List[Any] = list(A )
all_models.sort()
UpperCAmelCase_ : Dict = {'''model_type''': all_models}
UpperCAmelCase_ : Tuple = [pt_models[t] for t in all_models]
UpperCAmelCase_ : Dict = [tf_models[t] for t in all_models]
UpperCAmelCase_ : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
UpperCAmelCase_ : int = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
UpperCAmelCase_ : Any = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
UpperCAmelCase_ : Union[str, Any] = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
UpperCAmelCase_ : int = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
UpperCAmelCase_ : Dict = '''AutoTokenizer'''
UpperCAmelCase_ : str = [processors[t] for t in all_models]
return pd.DataFrame(A )
def __UpperCAmelCase ( A : Optional[int] ) -> str:
UpperCAmelCase_ : int = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
UpperCAmelCase_ : Tuple = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
UpperCAmelCase_ : Tuple = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(A , A , A ):
# The type of pipeline may not exist in this framework
if not hasattr(A , A ):
continue
# First extract all model_names
UpperCAmelCase_ : List[str] = []
for name in getattr(A , A ).values():
if isinstance(A , A ):
model_names.append(A )
else:
model_names.extend(list(A ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __UpperCAmelCase ( A : int , A : Any ) -> Tuple:
UpperCAmelCase_ : Tuple = get_frameworks_table()
UpperCAmelCase_ : Any = Dataset.from_pandas(A )
UpperCAmelCase_ : str = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=A )
UpperCAmelCase_ : Union[str, Any] = Dataset.from_json(A )
UpperCAmelCase_ : Optional[int] = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(A ) )
}
UpperCAmelCase_ : str = update_pipeline_and_auto_class_table(A )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
UpperCAmelCase_ : Union[str, Any] = sorted(table.keys() )
UpperCAmelCase_ : Optional[Any] = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
UpperCAmelCase_ : Dict = Dataset.from_pandas(A )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(A , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(A , '''pipeline_tags.json''' ) )
if commit_sha is not None:
UpperCAmelCase_ : List[str] = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
UpperCAmelCase_ : int = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=A , repo_type='''dataset''' , token=A , commit_message=A , )
def __UpperCAmelCase ( ) -> int:
UpperCAmelCase_ : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
UpperCAmelCase_ : List[str] = transformers_module.pipelines.SUPPORTED_TASKS
UpperCAmelCase_ : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
UpperCAmelCase_ : Optional[Any] = pipeline_tasks[key]['''pt''']
if isinstance(A , (list, tuple) ):
UpperCAmelCase_ : Dict = model[0]
UpperCAmelCase_ : Any = model.__name__
if model not in in_table.values():
missing.append(A )
if len(A ) > 0:
UpperCAmelCase_ : List[Any] = ''', '''.join(A )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_UpperCamelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 304 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : Tuple = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = '''codegen'''
UpperCAmelCase__: Union[str, Any] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , A__=5_0400 , A__=2048 , A__=2048 , A__=4096 , A__=28 , A__=16 , A__=64 , A__=None , A__="gelu_new" , A__=0.0 , A__=0.0 , A__=0.0 , A__=1e-5 , A__=0.0_2 , A__=True , A__=5_0256 , A__=5_0256 , A__=False , **A__ , ):
A__ : str = vocab_size
A__ : Optional[int] = n_ctx
A__ : Optional[Any] = n_positions
A__ : Dict = n_embd
A__ : List[str] = n_layer
A__ : Dict = n_head
A__ : Tuple = n_inner
A__ : Union[str, Any] = rotary_dim
A__ : Optional[Any] = activation_function
A__ : Union[str, Any] = resid_pdrop
A__ : str = embd_pdrop
A__ : List[str] = attn_pdrop
A__ : Tuple = layer_norm_epsilon
A__ : Optional[Any] = initializer_range
A__ : Optional[int] = use_cache
A__ : Tuple = bos_token_id
A__ : List[Any] = eos_token_id
super().__init__(
bos_token_id=A__ , eos_token_id=A__ , tie_word_embeddings=A__ , **A__ )
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ , A__ = "default" , A__ = None , A__ = False , ):
super().__init__(A__ , task=A__ , patching_specs=A__ , use_past=A__ )
if not getattr(self._config , """pad_token_id""" , A__ ):
# TODO: how to do that better?
A__ : Tuple = 0
@property
def __A ( self ):
A__ : Optional[int] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(A__ , direction="""inputs""" )
A__ : List[str] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
A__ : Optional[int] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __A ( self ):
return self._config.n_layer
@property
def __A ( self ):
return self._config.n_head
def __A ( self , A__ , A__ = -1 , A__ = -1 , A__ = False , A__ = None , ):
A__ : Any = super(A__ , self ).generate_dummy_inputs(
A__ , batch_size=A__ , seq_length=A__ , is_pair=A__ , framework=A__ )
# We need to order the input in the way they appears in the forward()
A__ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A__ , A__ : Optional[int] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A__ : Union[str, Any] = seqlen + 2
A__ : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ : str = [
(torch.zeros(A__ ), torch.zeros(A__ )) for _ in range(self.num_layers )
]
A__ : int = common_inputs["""attention_mask"""]
if self.use_past:
A__ : Any = ordered_inputs["""attention_mask"""].dtype
A__ : Optional[int] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(A__ , A__ , dtype=A__ )] , dim=1 )
return ordered_inputs
@property
def __A ( self ):
return 13
| 141 |
from __future__ import annotations
def UpperCamelCase (lowercase_: list[int] , lowercase_: list[int] , lowercase_: int ) -> tuple[float, list[float]]:
A__ : Tuple = list(range(len(lowercase_ ) ) )
A__ : Union[str, Any] = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
A__ : float = 0
A__ : list[float] = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
A__ : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ : Union[str, Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141 | 1 |
from __future__ import annotations
from collections.abc import Callable
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 100 , ):
snake_case_ = x_start
snake_case_ = fnc(SCREAMING_SNAKE_CASE__ )
snake_case_ = 0.0
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
snake_case_ = (x_end - x_start) / steps + xa
snake_case_ = fnc(SCREAMING_SNAKE_CASE__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
snake_case_ = xa
snake_case_ = fxa
return area
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowerCAmelCase_ = 10
while i <= 10_00_00:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10 | 8 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def a ( ):
'''simple docstring'''
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case__ , default=snake_case__ , required=snake_case__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case__ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case__ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case__ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case__ , default=0 , help='''cuda_id.''' , )
lowercase_ = parser.parse_args()
return args
def a ( snake_case__: Optional[Any] , snake_case__: Tuple , snake_case__: Union[str, Any] ):
'''simple docstring'''
if not len(snake_case__ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
lowercase_ , lowercase_ = imgs[0].size
lowercase_ = Image.new('''RGB''' , size=(cols * w, rows * h) )
lowercase_ , lowercase_ = grid.size
for i, img in enumerate(snake_case__ ):
grid.paste(snake_case__ , box=(i % cols * w, i // cols * h) )
return grid
def a ( snake_case__: Tuple , snake_case__: Union[str, Any]="robotic cat with wings" , snake_case__: Union[str, Any]=7.5 , snake_case__: List[str]=50 , snake_case__: List[Any]=1 , snake_case__: Optional[int]=42 , ):
'''simple docstring'''
lowercase_ = torch.Generator(pipeline.device ).manual_seed(snake_case__ )
lowercase_ = pipeline(
snake_case__ , guidance_scale=snake_case__ , num_inference_steps=snake_case__ , generator=snake_case__ , num_images_per_prompt=snake_case__ , ).images
lowercase_ = int(math.sqrt(snake_case__ ) )
lowercase_ = image_grid(snake_case__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__a = parse_args()
# Load models and create wrapper for stable diffusion
__a = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
__a = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
__a = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
__a = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
__a = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__a = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
__a = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
__a = unet.to(torch.device('cuda', args.cuda_id))
__a = pipeline.to(unet.device)
__a , __a = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
__a = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 30 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowerCamelCase_ ( _a ):
def __init__( self : Optional[Any] ):
'''simple docstring'''
a = []
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : Dict ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : int ,**__lowerCamelCase : Tuple ):
'''simple docstring'''
self.events.append('''on_init_end''' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : int ,__lowerCamelCase : Tuple ,__lowerCamelCase : Dict ,**__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
self.events.append('''on_train_begin''' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Optional[Any] ,**__lowerCamelCase : Optional[int] ):
'''simple docstring'''
self.events.append('''on_train_end''' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : List[Any] ,**__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
self.events.append('''on_epoch_begin''' )
def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Union[str, Any] ,**__lowerCamelCase : List[Any] ):
'''simple docstring'''
self.events.append('''on_epoch_end''' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[str] ,**__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
self.events.append('''on_step_begin''' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : int ,__lowerCamelCase : List[str] ,__lowerCamelCase : Union[str, Any] ,**__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
self.events.append('''on_step_end''' )
def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : Any ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Union[str, Any] ,**__lowerCamelCase : List[str] ):
'''simple docstring'''
self.events.append('''on_evaluate''' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : str ,__lowerCamelCase : Dict ,**__lowerCamelCase : Any ):
'''simple docstring'''
self.events.append('''on_predict''' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : str ,__lowerCamelCase : Optional[Any] ,**__lowerCamelCase : Optional[int] ):
'''simple docstring'''
self.events.append('''on_save''' )
def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Tuple ,__lowerCamelCase : List[str] ,__lowerCamelCase : Optional[Any] ,**__lowerCamelCase : Any ):
'''simple docstring'''
self.events.append('''on_log''' )
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : List[Any] ,**__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
self.events.append('''on_prediction_step''' )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
a = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : List[Any]=0 ,__lowerCamelCase : Optional[Any]=0 ,__lowerCamelCase : str=64 ,__lowerCamelCase : int=64 ,__lowerCamelCase : Tuple=None ,__lowerCamelCase : Optional[Any]=False ,**__lowerCamelCase : List[str] ):
'''simple docstring'''
a = RegressionDataset(length=__lowerCamelCase )
a = RegressionDataset(length=__lowerCamelCase )
a = RegressionModelConfig(a=__lowerCamelCase ,b=__lowerCamelCase )
a = RegressionPreTrainedModel(__lowerCamelCase )
a = TrainingArguments(self.output_dir ,disable_tqdm=__lowerCamelCase ,report_to=[] ,**__lowerCamelCase )
return Trainer(
__lowerCamelCase ,__lowerCamelCase ,train_dataset=__lowerCamelCase ,eval_dataset=__lowerCamelCase ,callbacks=__lowerCamelCase ,)
def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : List[str] ,__lowerCamelCase : Dict ):
'''simple docstring'''
self.assertEqual(len(__lowerCamelCase ) ,len(__lowerCamelCase ) )
# Order doesn't matter
a = sorted(__lowerCamelCase ,key=lambda __lowerCamelCase : cb.__name__ if isinstance(__lowerCamelCase ,__lowerCamelCase ) else cb.__class__.__name__ )
a = sorted(__lowerCamelCase ,key=lambda __lowerCamelCase : cb.__name__ if isinstance(__lowerCamelCase ,__lowerCamelCase ) else cb.__class__.__name__ )
for cba, cba in zip(__lowerCamelCase ,__lowerCamelCase ):
if isinstance(__lowerCamelCase ,__lowerCamelCase ) and isinstance(__lowerCamelCase ,__lowerCamelCase ):
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
elif isinstance(__lowerCamelCase ,__lowerCamelCase ) and not isinstance(__lowerCamelCase ,__lowerCamelCase ):
self.assertEqual(__lowerCamelCase ,cba.__class__ )
elif not isinstance(__lowerCamelCase ,__lowerCamelCase ) and isinstance(__lowerCamelCase ,__lowerCamelCase ):
self.assertEqual(cba.__class__ ,__lowerCamelCase )
else:
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : Tuple ):
'''simple docstring'''
a = ['on_init_end', 'on_train_begin']
a = 0
a = len(trainer.get_eval_dataloader() )
a = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(__lowerCamelCase ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = self.get_trainer()
a = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,__lowerCamelCase )
# Callbacks passed at init are added to the default callbacks
a = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__lowerCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,__lowerCamelCase )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
a = self.get_trainer(disable_tqdm=__lowerCamelCase )
a = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
a = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__lowerCamelCase )
expected_callbacks.remove(__lowerCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,__lowerCamelCase )
a = self.get_trainer()
a = trainer.pop_callback(__lowerCamelCase )
self.assertEqual(cb.__class__ ,__lowerCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,__lowerCamelCase )
trainer.add_callback(__lowerCamelCase )
expected_callbacks.insert(0 ,__lowerCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,__lowerCamelCase )
# We can also add, pop, or remove by instance
a = self.get_trainer()
a = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__lowerCamelCase )
expected_callbacks.remove(__lowerCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,__lowerCamelCase )
a = self.get_trainer()
a = trainer.callback_handler.callbacks[0]
a = trainer.pop_callback(__lowerCamelCase )
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,__lowerCamelCase )
trainer.add_callback(__lowerCamelCase )
expected_callbacks.insert(0 ,__lowerCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=__lowerCamelCase )
a = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
a = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__lowerCamelCase ,self.get_expected_events(__lowerCamelCase ) )
# Independent log/save/eval
a = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
a = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__lowerCamelCase ,self.get_expected_events(__lowerCamelCase ) )
a = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
a = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__lowerCamelCase ,self.get_expected_events(__lowerCamelCase ) )
a = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' )
trainer.train()
a = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__lowerCamelCase ,self.get_expected_events(__lowerCamelCase ) )
a = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' )
trainer.train()
a = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__lowerCamelCase ,self.get_expected_events(__lowerCamelCase ) )
# A bit of everything
a = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=10 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
a = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__lowerCamelCase ,self.get_expected_events(__lowerCamelCase ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
a = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(__lowerCamelCase ) in warn_mock.call_args[0][0]
| 368 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> str | Literal[False]:
"""simple docstring"""
a = list(snake_case_ )
a = list(snake_case_ )
a = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
a = '''_'''
if count > 1:
return False
else:
return "".join(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> list[str]:
"""simple docstring"""
a = []
while True:
a = ['''$'''] * len(snake_case_ )
a = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1, len(snake_case_ ) ):
a = compare_string(binary[i], binary[j] )
if k is False:
a = '''*'''
a = '''*'''
temp.append('''X''' )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
a = list(set(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[str]:
"""simple docstring"""
a = []
for minterm in minterms:
a = ''''''
for _ in range(snake_case_ ):
a = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> bool:
"""simple docstring"""
a = list(snake_case_ )
a = list(snake_case_ )
a = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[str]:
"""simple docstring"""
a = []
a = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
a = 0
a = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
a = j
if count == 1:
a = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
a = 0
temp.append(prime_implicants[i] )
while True:
a = 0
a = -1
a = 0
for i in range(len(snake_case_ ) ):
a = chart[i].count(1 )
if count_n > max_n:
a = count_n
a = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
a = 0
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[list[int]]:
"""simple docstring"""
a = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
a = prime_implicants[i].count('''_''' )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i], binary[j], snake_case_ ):
a = 1
return chart
def SCREAMING_SNAKE_CASE__ ( ) -> None:
"""simple docstring"""
a = int(input('''Enter the no. of variables\n''' ) )
a = [
float(snake_case_ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
a = decimal_to_binary(snake_case_, snake_case_ )
a = check(snake_case_ )
print('''Prime Implicants are:''' )
print(snake_case_ )
a = prime_implicant_chart(snake_case_, snake_case_ )
a = selection(snake_case_, snake_case_ )
print('''Essential Prime Implicants are:''' )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 330 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''ConvNextFeatureExtractor''']
_lowerCAmelCase = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 37 | """simple docstring"""
__SCREAMING_SNAKE_CASE ={}
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowercase_ : Any = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowercase_ : Optional[int] = _calculate(days - 1 , __SCREAMING_SNAKE_CASE , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowercase_ : Any = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowercase_ : Dict = _calculate(days - 1 , __SCREAMING_SNAKE_CASE , 0 )
lowercase_ : str = state_late + state_absent + state_ontime
lowercase_ : Tuple = prizestrings
return prizestrings
def lowercase__( __SCREAMING_SNAKE_CASE : int = 30 ):
return _calculate(__SCREAMING_SNAKE_CASE , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 213 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a , a , a ):
__a = []
__a , __a = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__a = result + left + right
return input_list
def _lowerCamelCase( a ):
if len(a ) <= 1:
return input_list
__a = list(a )
# iteration for two-way merging
__a = 2
while p <= len(a ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(a ) , a ):
__a = i
__a = i + p - 1
__a = (low + high + 1) // 2
__a = merge(a , a , a , a )
# final merge of last two parts
if p * 2 >= len(a ):
__a = i
__a = merge(a , 0 , a , len(a ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Optional[int] = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
SCREAMING_SNAKE_CASE__:List[str] = []
else:
SCREAMING_SNAKE_CASE__:Union[str, Any] = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 268 | """simple docstring"""
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__)
class snake_case__ :
_snake_case : List[str] = None
@experimental
def _lowerCamelCase( a , a , a , a , a , a , a ):
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
a , a , a , a , a , a , a )
return _map_with_joblib(a , a , a , a , a , a , a )
def _lowerCamelCase( a , a , a , a , a , a , a ):
__a = num_proc if num_proc <= len(a ) else len(a )
__a = [] # We organize the splits ourselve (contiguous splits)
for index in range(a ):
__a = len(a ) // num_proc
__a = len(a ) % num_proc
__a = div * index + min(a , a )
__a = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(a ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"Error dividing inputs iterable among processes. "
F"Total number of objects {len(a )}, "
F"length: {sum(len(i[1] ) for i in split_kwds )}" )
logger.info(
F"Spawning {num_proc} processes for {len(a )} objects in slices of {[len(i[1] ) for i in split_kwds]}" )
__a , __a = None, None
if not disable_tqdm:
__a , __a = (RLock(),), tqdm.set_lock
with Pool(a , initargs=a , initializer=a ) as pool:
__a = pool.map(a , a )
logger.info(F"Finished {num_proc} processes" )
__a = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"Unpacked {len(a )} objects" )
return mapped
def _lowerCamelCase( a , a , a , a , a , a , a ):
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=a ):
return joblib.Parallel()(
joblib.delayed(a )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def _lowerCamelCase( a ):
__a = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
__a = None
| 268 | 1 |
import heapq as hq
import math
from collections.abc import Iterator
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[str] =str(id_ )
__UpperCamelCase : int =None
__UpperCamelCase : Tuple =None
__UpperCamelCase : Union[str, Any] =[]
__UpperCamelCase : int ={} # {vertex:distance}
def __lt__( self , lowerCamelCase__ ):
"""simple docstring"""
return self.key < other.key
def __repr__( self ):
"""simple docstring"""
return self.id
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
self.neighbors.append(lowerCAmelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : str =weight
def A ( a_ ,a_ ,a_ ,a_ ) -> List[Any]:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] ,a_ )
graph[b - 1].add_edge(graph[a - 1] ,a_ )
def A ( a_ ,a_ ) -> int:
__UpperCamelCase : int =[]
for u in graph:
__UpperCamelCase : Optional[int] =math.inf
__UpperCamelCase : List[str] =None
__UpperCamelCase : List[str] =0
__UpperCamelCase : str =graph[:]
while q:
__UpperCamelCase : Any =min(a_ )
q.remove(a_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__UpperCamelCase : Optional[int] =u
__UpperCamelCase : List[Any] =u.edges[v.id]
for i in range(1 ,len(a_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def A ( a_ ,a_ ) -> Any:
for u in graph:
__UpperCamelCase : Union[str, Any] =math.inf
__UpperCamelCase : Union[str, Any] =None
__UpperCamelCase : str =0
__UpperCamelCase : Dict =list(a_ )
hq.heapify(a_ )
while h:
__UpperCamelCase : int =hq.heappop(a_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__UpperCamelCase : Optional[Any] =u
__UpperCamelCase : Union[str, Any] =u.edges[v.id]
hq.heapify(a_ )
for i in range(1 ,len(a_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def A ( ) -> int:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | import csv
import tweepy
# Twitter API credentials
__a : Union[str, Any] = """"""
__a : Union[str, Any] = """"""
__a : Union[str, Any] = """"""
__a : List[Any] = """"""
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = tweepy.OAuthHandler(lowercase , lowercase )
auth.set_access_token(lowercase , lowercase )
__lowercase = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
__lowercase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__lowercase = api.user_timeline(screen_name=lowercase , count=200 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
__lowercase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
__lowercase = api.user_timeline(
screen_name=lowercase , count=200 , max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
__lowercase = alltweets[-1].id - 1
print(F"...{len(lowercase )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
__lowercase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , '''w''' ) as f:
__lowercase = csv.writer(lowercase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""") | 210 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_lowerCAmelCase : Any = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : str , snake_case : str , snake_case : Any , snake_case : Any )-> str:
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase__ : int = getattr(snake_case , snake_case )
if weight_type is not None:
UpperCAmelCase__ : List[Any] = getattr(snake_case , snake_case ).shape
else:
UpperCAmelCase__ : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
UpperCAmelCase__ : List[Any] = value
elif weight_type == "weight_g":
UpperCAmelCase__ : str = value
elif weight_type == "weight_v":
UpperCAmelCase__ : int = value
elif weight_type == "bias":
UpperCAmelCase__ : Optional[int] = value
elif weight_type == "running_mean":
UpperCAmelCase__ : Dict = value
elif weight_type == "running_var":
UpperCAmelCase__ : Tuple = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase__ : Any = value
elif weight_type == "inv_freq":
UpperCAmelCase__ : Any = value
else:
UpperCAmelCase__ : Dict = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Any , snake_case : Optional[int] )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Union[str, Any] = fairseq_model.state_dict()
UpperCAmelCase__ : Optional[int] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case , snake_case , snake_case , snake_case , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase__ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ : Optional[int] = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase__ : str = True
if "*" in mapped_key:
UpperCAmelCase__ : str = name.split(snake_case )[0].split("." )[-2]
UpperCAmelCase__ : Optional[Any] = mapped_key.replace("*" , snake_case )
if "pos_bias_u" in name:
UpperCAmelCase__ : Dict = None
elif "pos_bias_v" in name:
UpperCAmelCase__ : Any = None
elif "weight_g" in name:
UpperCAmelCase__ : Optional[Any] = "weight_g"
elif "weight_v" in name:
UpperCAmelCase__ : Optional[Any] = "weight_v"
elif "bias" in name:
UpperCAmelCase__ : int = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase__ : List[Any] = "weight"
elif "running_mean" in name:
UpperCAmelCase__ : Optional[Any] = "running_mean"
elif "inv_freq" in name:
UpperCAmelCase__ : Dict = "inv_freq"
elif "running_var" in name:
UpperCAmelCase__ : List[str] = "running_var"
elif "num_batches_tracked" in name:
UpperCAmelCase__ : List[Any] = "num_batches_tracked"
else:
UpperCAmelCase__ : Any = None
set_recursively(snake_case , snake_case , snake_case , snake_case , snake_case )
continue
if not is_used:
unused_weights.append(snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Any , snake_case : Tuple , snake_case : List[str] )-> Dict:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = full_name.split("conv_layers." )[-1]
UpperCAmelCase__ : int = name.split("." )
UpperCAmelCase__ : Tuple = int(items[0] )
UpperCAmelCase__ : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
UpperCAmelCase__ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
UpperCAmelCase__ : Dict = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
UpperCAmelCase__ : Tuple = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
UpperCAmelCase__ : Optional[int] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Union[str, Any]=None , snake_case : Tuple=None , snake_case : str=True )-> str:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ : Any = WavaVecaConformerConfig.from_pretrained(snake_case , hidden_act="swish" )
else:
UpperCAmelCase__ : List[str] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCAmelCase__ : int = "rotary"
if is_finetuned:
if dict_path:
UpperCAmelCase__ : str = Dictionary.load(snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase__ : Optional[int] = target_dict.pad_index
UpperCAmelCase__ : str = target_dict.bos_index
UpperCAmelCase__ : Tuple = target_dict.eos_index
UpperCAmelCase__ : str = len(target_dict.symbols )
UpperCAmelCase__ : Dict = os.path.join(snake_case , "vocab.json" )
if not os.path.isdir(snake_case ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(snake_case ) )
return
os.makedirs(snake_case , exist_ok=snake_case )
UpperCAmelCase__ : Union[str, Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : int = 1
with open(snake_case , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(snake_case , snake_case )
UpperCAmelCase__ : Tuple = WavaVecaCTCTokenizer(
snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=snake_case , )
UpperCAmelCase__ : int = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase__ : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case , return_attention_mask=snake_case , )
UpperCAmelCase__ : Any = WavaVecaProcessor(feature_extractor=snake_case , tokenizer=snake_case )
processor.save_pretrained(snake_case )
UpperCAmelCase__ : str = WavaVecaConformerForCTC(snake_case )
else:
UpperCAmelCase__ : List[Any] = WavaVecaConformerForPreTraining(snake_case )
if is_finetuned:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase__ : Optional[Any] = argparse.Namespace(task="audio_pretraining" )
UpperCAmelCase__ : Any = fairseq.tasks.setup_task(snake_case )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=snake_case )
UpperCAmelCase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(snake_case , snake_case , not is_finetuned )
hf_wavavec.save_pretrained(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 298 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =IFPipeline
SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __a ( self : Dict ):
'''simple docstring'''
return self._get_dummy_components()
def __a ( self : Any , snake_case__ : Dict , snake_case__ : Optional[Any]=0 ):
'''simple docstring'''
if str(snake_case__ ).startswith("mps" ):
UpperCAmelCase__ : str = torch.manual_seed(snake_case__ )
else:
UpperCAmelCase__ : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase__ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self : Tuple ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __a ( self : Tuple ):
'''simple docstring'''
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __a ( self : Dict ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __a ( self : int ):
'''simple docstring'''
self._test_save_load_local()
def __a ( self : Any ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self : Optional[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __a ( self : str ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Tuple ):
'''simple docstring'''
# if
UpperCAmelCase__ : Any = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
UpperCAmelCase__ : Union[str, Any] = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case__ , tokenizer=snake_case__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
UpperCAmelCase__ , UpperCAmelCase__ : Any = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : List[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCAmelCase__ : List[str] = IFImgaImgPipeline(**pipe_a.components )
UpperCAmelCase__ : List[str] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCAmelCase__ : List[str] = IFInpaintingPipeline(**pipe_a.components )
UpperCAmelCase__ : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def __a ( self : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[Any] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Dict = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : List[Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
UpperCAmelCase__ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : str = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def __a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Tuple = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : str = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCAmelCase__ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Dict = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Optional[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : int = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCAmelCase__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Any:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 298 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''FNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''FNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FNetForMaskedLM''',
'''FNetForMultipleChoice''',
'''FNetForNextSentencePrediction''',
'''FNetForPreTraining''',
'''FNetForQuestionAnswering''',
'''FNetForSequenceClassification''',
'''FNetForTokenClassification''',
'''FNetLayer''',
'''FNetModel''',
'''FNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 141 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
UpperCAmelCase = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class lowerCAmelCase ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = RealmTokenizer
def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : int=None , __lowercase : List[Any]=True , __lowercase : Any="[UNK]" , __lowercase : Union[str, Any]="[SEP]" , __lowercase : Union[str, Any]="[PAD]" , __lowercase : Tuple="[CLS]" , __lowercase : List[Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : Union[str, Any]=None , **__lowercase : int , ):
"""simple docstring"""
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
__lowercase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , __lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __lowercase ) != tokenize_chinese_chars
):
__lowercase =getattr(__lowercase , normalizer_state.pop('type' ) )
__lowercase =do_lower_case
__lowercase =strip_accents
__lowercase =tokenize_chinese_chars
__lowercase =normalizer_class(**__lowercase )
__lowercase =do_lower_case
def snake_case ( self : List[str] , __lowercase : Optional[Any] , **__lowercase : Any ):
"""simple docstring"""
__lowercase =PaddingStrategy.MAX_LENGTH
__lowercase =text
__lowercase =kwargs.pop('text_pair' , __lowercase )
__lowercase =kwargs.pop('return_tensors' , __lowercase )
__lowercase ={
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(__lowercase ):
if batch_text_pair is not None:
__lowercase =batch_text_pair[idx]
else:
__lowercase =None
__lowercase =super().__call__(__lowercase , __lowercase , return_tensors=__lowercase , **__lowercase )
__lowercase =encoded_candidates.get('input_ids' )
__lowercase =encoded_candidates.get('attention_mask' )
__lowercase =encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(__lowercase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__lowercase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__lowercase )
__lowercase ={key: item for key, item in output_data.items() if len(__lowercase ) != 0}
return BatchEncoding(__lowercase , tensor_type=__lowercase )
def snake_case ( self : List[str] , __lowercase : Tuple , __lowercase : Optional[int]=None ):
"""simple docstring"""
__lowercase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self : List[str] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
"""simple docstring"""
__lowercase =[self.sep_token_id]
__lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self : Dict , __lowercase : str , __lowercase : Optional[str] = None ):
"""simple docstring"""
__lowercase =self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 141 | 1 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
a_ = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
a_ = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
a_ = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def __lowercase ( lowerCamelCase : List[str] , lowerCamelCase : Any ):
return float((preds == labels).mean() )
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any]="binary" ):
UpperCamelCase_ : Optional[Any] = simple_accuracy(lowerCamelCase , lowerCamelCase )
UpperCamelCase_ : Union[str, Any] = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowercase ( lowerCamelCase : Dict , lowerCamelCase : str ):
UpperCamelCase_ : str = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
UpperCamelCase_ : int = F"{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"
UpperCamelCase_ : Union[str, Any] = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCamelCase_ : str = [(pred, label)]
UpperCamelCase_, UpperCamelCase_ : Dict = [], []
for question, preds_labels in question_map.items():
UpperCamelCase_, UpperCamelCase_ : Tuple = zip(*lowerCamelCase )
UpperCamelCase_ : List[Any] = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
UpperCamelCase_ : Optional[int] = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
UpperCamelCase_ : Dict = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
UpperCamelCase_ : Any = sum(lowerCamelCase ) / len(lowerCamelCase )
UpperCamelCase_ : List[str] = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : List[Any] , snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
UpperCamelCase_ : Tuple = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
UpperCamelCase_ : Optional[Any] = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 360 | a_ = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 50 | 0 |
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Dict = int(np.ceil((x_end - xa) / step_size))
SCREAMING_SNAKE_CASE : Tuple = np.zeros((n + 1,))
SCREAMING_SNAKE_CASE : int = ya
SCREAMING_SNAKE_CASE : int = xa
for k in range(_a):
SCREAMING_SNAKE_CASE : Any = y[k] + step_size * ode_func(_a , y[k])
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase=0.01 , __UpperCAmelCase=1000 ):
'''simple docstring'''
__lowerCamelCase = p_stop
__lowerCamelCase = max_length
def __iter__( self ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = False
while not stop and count < self.max_length:
yield count
count += 1
__lowerCamelCase = random.random() < self.p_stop
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ):
'''simple docstring'''
__lowerCamelCase = [
BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
for i in range(2 )
]
__lowerCamelCase = [list(__UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__UpperCAmelCase ) for shard in batch_sampler_shards] , [len(__UpperCAmelCase ) for e in expected] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of total batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of total batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__lowerCamelCase = [BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=False ):
'''simple docstring'''
random.seed(__UpperCAmelCase )
__lowerCamelCase = list(__UpperCAmelCase )
__lowerCamelCase = [
IterableDatasetShard(
__UpperCAmelCase , batch_size=__UpperCAmelCase , drop_last=__UpperCAmelCase , num_processes=__UpperCAmelCase , process_index=__UpperCAmelCase , split_batches=__UpperCAmelCase , )
for i in range(__UpperCAmelCase )
]
__lowerCamelCase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__UpperCAmelCase )
iterable_dataset_lists.append(list(__UpperCAmelCase ) )
__lowerCamelCase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__lowerCamelCase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
self.assertTrue(len(__UpperCAmelCase ) % shard_batch_size == 0 )
__lowerCamelCase = []
for idx in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__UpperCAmelCase ) < len(__UpperCAmelCase ):
reference += reference
self.assertListEqual(__UpperCAmelCase , reference[: len(__UpperCAmelCase )] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 42
__lowerCamelCase = RandomIterableDataset()
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
# Edge case with a very small dataset
__lowerCamelCase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = SkipBatchSampler(__UpperCAmelCase , 2 )
self.assertListEqual(list(__UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 )
__lowerCamelCase = skip_first_batches(__UpperCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowerCamelCase ( self ):
'''simple docstring'''
Accelerator()
__lowerCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 330 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase : Any = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 357 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = DebertaTokenizer
A : List[Any] = True
A : Dict = DebertaTokenizerFast
def _lowerCAmelCase ( self ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
snake_case_ : Optional[Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
snake_case_ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case_ : Optional[int] = {"unk_token": "[UNK]"}
snake_case_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_SCREAMING_SNAKE_CASE ) )
def _lowerCAmelCase ( self , **_SCREAMING_SNAKE_CASE ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ : int = "lower newer"
snake_case_ : Dict = "lower newer"
return input_text, output_text
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : str = "lower newer"
snake_case_ : int = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
snake_case_ : str = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Dict = tokens + [tokenizer.unk_token]
snake_case_ : int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : str = tokenizer("Hello" , "World" )
snake_case_ : List[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , _SCREAMING_SNAKE_CASE )
@slow
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : str = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
snake_case_ : Optional[int] = tokenizer.encode("sequence builders" , add_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case_ : int = tokenizer.encode("multi-sequence build" , add_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = tokenizer.encode(
"sequence builders" , add_special_tokens=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
snake_case_ : str = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
snake_case_ : str = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
snake_case_ : str = tokenizer_class.from_pretrained("microsoft/deberta-base" )
snake_case_ : int = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
snake_case_ : Any = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = [tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) for seq in encoding["input_ids"]]
# fmt: off
snake_case_ : List[Any] = {
"input_ids": [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
snake_case_ : List[str] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , _SCREAMING_SNAKE_CASE )
for expected, decoded in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 36 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def snake_case ( A__ ):
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def snake_case ( A__ ):
UpperCAmelCase_ : List[Any] = create_tensor(A__ )
UpperCAmelCase_ : str = gather(A__ )
assert gathered_tensor.tolist() == list(range(1 ,state.num_processes**2 + 1 ) )
def snake_case ( A__ ):
UpperCAmelCase_ : List[Any] = [state.process_index]
UpperCAmelCase_ : List[Any] = gather_object(A__ )
assert len(A__ ) == state.num_processes, F"""{gathered_obj}, {len(A__ )} != {state.num_processes}"""
assert gathered_obj == list(range(state.num_processes ) ), F"""{gathered_obj} != {list(range(state.num_processes ) )}"""
def snake_case ( A__ ):
UpperCAmelCase_ : Optional[Any] = create_tensor(A__ )
UpperCAmelCase_ : Optional[Any] = broadcast(A__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 ,state.num_processes + 1 ) )
def snake_case ( A__ ):
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
UpperCAmelCase_ : Any = torch.arange(state.num_processes + 1 ).to(state.device )
else:
UpperCAmelCase_ : List[str] = torch.arange(state.num_processes ).to(state.device )
UpperCAmelCase_ : Optional[Any] = pad_across_processes(A__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 ,state.num_processes ) ) + [0]
def snake_case ( A__ ):
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCAmelCase_ : List[str] = create_tensor(A__ )
UpperCAmelCase_ : Tuple = reduce(A__ ,"sum" )
UpperCAmelCase_ : Optional[int] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(A__ ,A__ ), F"""{reduced_tensor} != {truth_tensor}"""
def snake_case ( A__ ):
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCAmelCase_ : str = create_tensor(A__ )
UpperCAmelCase_ : Optional[int] = reduce(A__ ,"mean" )
UpperCAmelCase_ : List[str] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(A__ ,A__ ), F"""{reduced_tensor} != {truth_tensor}"""
def snake_case ( A__ ):
# For xla_spawn (TPUs)
main()
def snake_case ( ):
UpperCAmelCase_ : Dict = PartialState()
state.print(F"""State: {state}""" )
state.print("testing gather" )
test_gather(A__ )
state.print("testing gather_object" )
test_gather_object(A__ )
state.print("testing broadcast" )
test_broadcast(A__ )
state.print("testing pad_across_processes" )
test_pad_across_processes(A__ )
state.print("testing reduce_sum" )
test_reduce_sum(A__ )
state.print("testing reduce_mean" )
test_reduce_mean(A__ )
if __name__ == "__main__":
main()
| 268 |
"""simple docstring"""
lowerCamelCase_ = [
(1000, '''M'''),
(900, '''CM'''),
(500, '''D'''),
(400, '''CD'''),
(100, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def snake_case ( A__ ):
UpperCAmelCase_ : List[str] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 1_00, "D": 5_00, "M": 10_00}
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Tuple = 0
while place < len(A__ ):
if (place + 1 < len(A__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def snake_case ( A__ ):
UpperCAmelCase_ : Union[str, Any] = []
for arabic, roman in ROMAN:
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = divmod(A__ ,A__ )
result.append(roman * factor )
if number == 0:
break
return "".join(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268 | 1 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
UpperCamelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class _lowerCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
snake_case = 10_000
snake_case = None
snake_case = None
class _lowerCamelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
snake_case = ParquetConfig
def _snake_case ( self )->Dict:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
A_ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_SCREAMING_SNAKE_CASE , (str, list, tuple) ):
A_ : int = data_files
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A_ : Any = [dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A_ : List[str] = []
for split_name, files in data_files.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A_ : List[Any] = [dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE , '''rb''' ) as f:
A_ : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(_SCREAMING_SNAKE_CASE ) )
break
splits.append(datasets.SplitGenerator(name=_SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) )
return splits
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->pa.Table:
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A_ : Dict = table_cast(_SCREAMING_SNAKE_CASE , self.info.features.arrow_schema )
return pa_table
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ) ):
with open(_SCREAMING_SNAKE_CASE , '''rb''' ) as f:
A_ : Union[str, Any] = pq.ParquetFile(_SCREAMING_SNAKE_CASE )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
A_ : List[Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(_SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_SCREAMING_SNAKE_CASE )}: {e}''' )
raise
| 65 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return EnvironmentCommand()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return EnvironmentCommand(args.accelerate_config_file )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : str = parser.add_parser('''env''' )
download_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
download_parser.add_argument(
'''--accelerate-config_file''' , default=_SCREAMING_SNAKE_CASE , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def __init__( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->None:
'''simple docstring'''
A_ : Optional[Any] = accelerate_config_file
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Tuple = '''not installed'''
if is_safetensors_available():
import safetensors
A_ : Any = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
A_ : Optional[Any] = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ : Union[str, Any] = '''not installed'''
A_ : List[Any] = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ : int = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_SCREAMING_SNAKE_CASE ):
A_ : str = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ : List[Any] = (
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else F'''\t{accelerate_config}'''
)
A_ : Optional[int] = '''not installed'''
A_ : str = '''NA'''
if is_torch_available():
import torch
A_ : Tuple = torch.__version__
A_ : List[Any] = torch.cuda.is_available()
A_ : int = '''not installed'''
A_ : Any = '''NA'''
if is_tf_available():
import tensorflow as tf
A_ : str = tf.__version__
try:
# deprecated in v2.1
A_ : List[str] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ : Any = bool(tf.config.list_physical_devices('''GPU''' ) )
A_ : Union[str, Any] = '''not installed'''
A_ : Tuple = '''not installed'''
A_ : Tuple = '''not installed'''
A_ : Union[str, Any] = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
A_ : Tuple = flax.__version__
A_ : List[Any] = jax.__version__
A_ : List[Any] = jaxlib.__version__
A_ : Dict = jax.lib.xla_bridge.get_backend().platform
A_ : Union[str, Any] = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F'''{safetensors_version}''',
'''Accelerate version''': F'''{accelerate_version}''',
'''Accelerate config''': F'''{accelerate_config_str}''',
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''Tensorflow version (GPU?)''': F'''{tf_version} ({tf_cuda_available})''',
'''Flax version (CPU?/GPU?/TPU?)''': F'''{flax_version} ({jax_backend})''',
'''Jax version''': F'''{jax_version}''',
'''JaxLib version''': F'''{jaxlib_version}''',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_SCREAMING_SNAKE_CASE ) )
return info
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 65 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 298 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
__UpperCamelCase : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__UpperCamelCase : Any = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , snake_case__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__UpperCamelCase : str = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__UpperCamelCase : Tuple = os.path.join(get_home_dir() , "models" )
__UpperCamelCase : Union[str, Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ )
__UpperCamelCase : Union[str, Any] = nlp.model.BERTModel(
snake_case__ , len(snake_case__ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=snake_case__ , use_decoder=snake_case__ , )
original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ )
__UpperCamelCase : int = original_bort._collect_params_with_prefix()
# Build our config 🤗
__UpperCamelCase : Any = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(snake_case__ ),
}
__UpperCamelCase : List[str] = BertConfig.from_dict(snake_case__ )
__UpperCamelCase : str = BertForMaskedLM(snake_case__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(snake_case__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(snake_case__ , snake_case__ ):
__UpperCamelCase : Any = hf_param.shape
__UpperCamelCase : List[Any] = to_torch(params[gluon_param] )
__UpperCamelCase : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
__UpperCamelCase : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__UpperCamelCase : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__UpperCamelCase : Any = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__UpperCamelCase : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__UpperCamelCase : BertSelfAttention = layer.attention.self
__UpperCamelCase : int = check_and_map_params(
self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
__UpperCamelCase : List[str] = check_and_map_params(
self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
__UpperCamelCase : str = check_and_map_params(
self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
__UpperCamelCase : List[str] = check_and_map_params(
self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
__UpperCamelCase : Tuple = check_and_map_params(
self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
__UpperCamelCase : BertSelfOutput = layer.attention.output
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" )
__UpperCamelCase : Optional[int] = check_and_map_params(
self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
__UpperCamelCase : BertIntermediate = layer.intermediate
__UpperCamelCase : Dict = check_and_map_params(
intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
__UpperCamelCase : BertOutput = layer.output
__UpperCamelCase : Dict = check_and_map_params(
bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
__UpperCamelCase : Union[str, Any] = check_and_map_params(
bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
__UpperCamelCase : List[str] = check_and_map_params(
bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
__UpperCamelCase : int = check_and_map_params(
bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__UpperCamelCase : Any = RobertaTokenizer.from_pretrained("roberta-base" )
__UpperCamelCase : int = tokenizer.encode_plus(snake_case__ )["input_ids"]
# Get gluon output
__UpperCamelCase : Dict = mx.nd.array([input_ids] )
__UpperCamelCase : Any = original_bort(inputs=snake_case__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case__ )
__UpperCamelCase : Optional[Any] = BertModel.from_pretrained(snake_case__ )
hf_bort_model.eval()
__UpperCamelCase : str = tokenizer.encode_plus(snake_case__ , return_tensors="pt" )
__UpperCamelCase : Dict = hf_bort_model(**snake_case__ )[0]
__UpperCamelCase : List[Any] = output_gluon[0].asnumpy()
__UpperCamelCase : Optional[int] = output_hf[0].detach().numpy()
__UpperCamelCase : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__UpperCamelCase : List[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 298 | 1 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : Tuple = TransfoXLTokenizer
a__ : str = False
a__ : Union[str, Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
super().setUp()
UpperCAmelCase_= [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
UpperCAmelCase_= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **__UpperCAmelCase : List[Any] ) -> Union[str, Any]:
UpperCAmelCase_= True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : int ) -> List[Any]:
UpperCAmelCase_= """<unk> UNwanted , running"""
UpperCAmelCase_= """<unk> unwanted, running"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
UpperCAmelCase_= TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__UpperCAmelCase )
UpperCAmelCase_= tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(__UpperCAmelCase , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [0, 4, 8, 7] )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
UpperCAmelCase_= TransfoXLTokenizer(lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
UpperCAmelCase_= TransfoXLTokenizer(lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
UpperCAmelCase_= TransfoXLTokenizer(lower_case=__UpperCAmelCase )
UpperCAmelCase_= """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
UpperCAmelCase_= [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(__UpperCAmelCase ) , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= len(__UpperCAmelCase )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__UpperCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 277 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowercase ( unittest.TestCase):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase_= tempfile.mkdtemp()
UpperCAmelCase_= BlipImageProcessor()
UpperCAmelCase_= GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
UpperCAmelCase_= BlipaProcessor(__UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **__UpperCAmelCase : Union[str, Any] ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__UpperCAmelCase : str ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
UpperCAmelCase_= [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_= [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
UpperCAmelCase_= BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_= self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCAmelCase_= self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
UpperCAmelCase_= BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= self.prepare_image_inputs()
UpperCAmelCase_= image_processor(__UpperCAmelCase , return_tensors="""np""" )
UpperCAmelCase_= processor(images=__UpperCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= """lower newer"""
UpperCAmelCase_= processor(text=__UpperCAmelCase )
UpperCAmelCase_= tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= """lower newer"""
UpperCAmelCase_= self.prepare_image_inputs()
UpperCAmelCase_= processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_= processor.batch_decode(__UpperCAmelCase )
UpperCAmelCase_= tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= """lower newer"""
UpperCAmelCase_= self.prepare_image_inputs()
UpperCAmelCase_= processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 277 | 1 |
'''simple docstring'''
from __future__ import annotations
class snake_case :
"""simple docstring"""
def __init__( self : Optional[int] , __A : list[list[int]] ):
__UpperCamelCase = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(__A ) != 0:
__UpperCamelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__A ) != cols:
raise error
for value in row:
if not isinstance(__A , (int, float) ):
raise error
__UpperCamelCase = rows
else:
__UpperCamelCase = []
def _lowerCamelCase ( self : int ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _lowerCamelCase ( self : str ):
return len(self.rows )
@property
def _lowerCamelCase ( self : Any ):
return len(self.rows[0] )
@property
def _lowerCamelCase ( self : Optional[Any] ):
return (self.num_rows, self.num_columns)
@property
def _lowerCamelCase ( self : Dict ):
return self.order[0] == self.order[1]
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__A )
def _lowerCamelCase ( self : Any ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _lowerCamelCase ( self : List[str] ):
return bool(self.determinant() )
def _lowerCamelCase ( self : Dict , __A : int , __A : int ):
__UpperCamelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__A ).determinant()
def _lowerCamelCase ( self : Dict , __A : int , __A : int ):
if (row + column) % 2 == 0:
return self.get_minor(__A , __A )
return -1 * self.get_minor(__A , __A )
def _lowerCamelCase ( self : List[str] ):
return Matrix(
[
[self.get_minor(__A , __A ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _lowerCamelCase ( self : Union[str, Any] ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__A )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Optional[Any] ):
return str(self.rows )
def __str__( self : Union[str, Any] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(__A ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def _lowerCamelCase ( self : List[Any] , __A : list[int] , __A : int | None = None ):
__UpperCamelCase = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(__A , __A ):
raise type_error
for value in row:
if not isinstance(__A , (int, float) ):
raise type_error
if len(__A ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(__A )
else:
__UpperCamelCase = self.rows[0:position] + [row] + self.rows[position:]
def _lowerCamelCase ( self : Optional[Any] , __A : list[int] , __A : int | None = None ):
__UpperCamelCase = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(__A , __A ):
raise type_error
for value in column:
if not isinstance(__A , (int, float) ):
raise type_error
if len(__A ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
__UpperCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__UpperCamelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Tuple , __A : object ):
if not isinstance(__A , __A ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , __A : object ):
return not self == other
def __neg__( self : List[Any] ):
return self * -1
def __add__( self : List[str] , __A : Matrix ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : str , __A : Matrix ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : str , __A : Matrix | int | float ):
if isinstance(__A , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__A , __A ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(__A , __A ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : Union[str, Any] , __A : int ):
if not isinstance(__A , __A ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
__UpperCamelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _lowerCamelCase ( cls : Tuple , __A : list[int] , __A : list[int] ):
return sum(row[i] * column[i] for i in range(len(__A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> list[tuple[int, int]]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = position
lowerCamelCase__ : Optional[Any] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowerCamelCase__ : Dict = []
for position in positions:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_UpperCAmelCase )
return permissible_positions
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
if is_complete(_UpperCAmelCase ):
return True
for position in get_valid_pos(_UpperCAmelCase , len(_UpperCAmelCase ) ):
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = position
if board[y][x] == 0:
lowerCamelCase__ : List[Any] = curr + 1
if open_knight_tour_helper(_UpperCAmelCase , _UpperCAmelCase , curr + 1 ):
return True
lowerCamelCase__ : Optional[Any] = 0
return False
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[list[int]]:
lowerCamelCase__ : Any = [[0 for i in range(_UpperCAmelCase )] for j in range(_UpperCAmelCase )]
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = 1
if open_knight_tour_helper(_UpperCAmelCase , (i, j) , 1 ):
return board
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Any = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def _snake_case ( A , A , A ) -> tuple:
lowerCAmelCase__ = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 228 |
'''simple docstring'''
def _snake_case ( A = 10 ) -> str:
if not isinstance(A , A ) or n < 0:
raise ValueError('''Invalid input''' )
lowerCAmelCase__ = 10**n
lowerCAmelCase__ = 28433 * (pow(2 , 7830457 , A )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""") | 228 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCamelCase_ :
def __init__( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict ) -> Any:
if len(__a ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
UpperCAmelCase_ : list[float] = list(__a )
UpperCAmelCase_ : Any = degree
def __add__( self : Dict , lowerCAmelCase_ : Optional[int] ) -> Dict:
if self.degree > polynomial_a.degree:
UpperCAmelCase_ : Optional[Any] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , __a )
else:
UpperCAmelCase_ : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , __a )
def __sub__( self : Dict , lowerCAmelCase_ : Dict ) -> List[Any]:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Dict ) -> Union[str, Any]:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , __a )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = ""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__a )
return polynomial
def __repr__( self : Tuple ) -> Union[str, Any]:
return self.__str__()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
UpperCAmelCase_ : list[float] = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase_ : Optional[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , __a )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Any = 0 ) -> List[Any]:
UpperCAmelCase_ : list[float] = [0] * (self.degree + 2)
UpperCAmelCase_ : Dict = constant
for i in range(self.degree + 1 ):
UpperCAmelCase_ : Tuple = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , __a )
def __eq__( self : int , lowerCAmelCase_ : Tuple ) -> Optional[Any]:
if not isinstance(__a , __a ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> Tuple:
return not self.__eq__(__a )
| 268 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Union[str, Any] = feat_extract_activation
_lowerCAmelCase : Optional[Any] = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : str = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : str = num_conv_pos_embeddings
_lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups
_lowerCAmelCase : str = len(self.conv_dim)
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : Optional[Any] = hidden_dropout
_lowerCAmelCase : List[str] = attention_dropout
_lowerCAmelCase : Tuple = activation_dropout
_lowerCAmelCase : int = feat_proj_dropout
_lowerCAmelCase : List[str] = final_dropout
_lowerCAmelCase : int = layerdrop
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Optional[Any] = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : str = apply_spec_augment
_lowerCAmelCase : Optional[Any] = mask_time_prob
_lowerCAmelCase : Optional[int] = mask_time_length
_lowerCAmelCase : List[str] = mask_time_min_masks
_lowerCAmelCase : Optional[int] = mask_feature_prob
_lowerCAmelCase : Optional[int] = mask_feature_length
_lowerCAmelCase : List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : Union[str, Any] = num_codevectors_per_group
_lowerCAmelCase : str = num_codevector_groups
_lowerCAmelCase : Optional[int] = contrastive_logits_temperature
_lowerCAmelCase : Optional[int] = feat_quantizer_dropout
_lowerCAmelCase : Optional[int] = num_negatives
_lowerCAmelCase : Union[str, Any] = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Tuple = ctc_loss_reduction
_lowerCAmelCase : Tuple = ctc_zero_infinity
# adapter
_lowerCAmelCase : List[Any] = add_adapter
_lowerCAmelCase : List[str] = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[str] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : Tuple = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : str = list(__a)
_lowerCAmelCase : Union[str, Any] = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 36 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__lowerCAmelCase = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 1_0**1_2 ):
"""simple docstring"""
_a : List[str] = 1
_a : Optional[int] = 0
_a : Any = 1
_a : List[str] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 1 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( __A, __A, __A=None ) -> List[str]:
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
UpperCAmelCase__ = nn.Parameter(__A )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
UpperCAmelCase__ = nn.Parameter(__A )
def lowerCAmelCase_ ( __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = np.asarray(weights[0] )
UpperCAmelCase__ = np.asarray(weights[1] )
UpperCAmelCase__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key, torch.tensor(__A ).transpose(1, 2 ).contiguous().view(-1, __A ), )
set_param(
torch_layer.self_attention.value, torch.tensor(__A ).transpose(1, 2 ).contiguous().view(-1, __A ), )
set_param(
torch_layer.output.dense, torch.tensor(__A ).view(-1, __A ).contiguous().transpose(0, 1 ), )
def lowerCAmelCase_ ( __A, __A, __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = np.asarray(weights[0] )
UpperCAmelCase__ = np.asarray(weights[1] )
UpperCAmelCase__ = np.asarray(weights[2] )
UpperCAmelCase__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query, torch.tensor(__A ).transpose(1, 2 ).contiguous().view(-1, __A ), )
set_param(
torch_layer.self_attention.key, torch.tensor(__A ).transpose(1, 2 ).contiguous().view(-1, __A ), )
set_param(
torch_layer.self_attention.value, torch.tensor(__A ).transpose(1, 2 ).contiguous().view(-1, __A ), )
set_param(
torch_layer.output.dense, torch.tensor(__A ).view(-1, __A ).contiguous().transpose(0, 1 ), )
def lowerCAmelCase_ ( __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = weights[0][0][0]
UpperCAmelCase__ = np.asarray(layer_norm_a[0] )
UpperCAmelCase__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm, torch.tensor(__A ), torch.tensor(__A ), )
# lsh weights + output
UpperCAmelCase__ = weights[0][1]
if len(__A ) < 4:
set_layer_weights_in_torch_lsh(__A, torch_block.attention, __A )
else:
set_layer_weights_in_torch_local(__A, torch_block.attention, __A )
# intermediate weighs
UpperCAmelCase__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(__A ) == 4:
UpperCAmelCase__ = intermediate_weights[2]
# layernorm 2
UpperCAmelCase__ = np.asarray(intermediate_weights[0][0] )
UpperCAmelCase__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm, torch.tensor(__A ), torch.tensor(__A ), )
# intermediate dense
UpperCAmelCase__ = np.asarray(intermediate_weights[1][0] )
UpperCAmelCase__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense, torch.tensor(__A ).transpose(0, 1 ).contiguous(), torch.tensor(__A ), )
# intermediate out
UpperCAmelCase__ = np.asarray(intermediate_weights[4][0] )
UpperCAmelCase__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense, torch.tensor(__A ).transpose(0, 1 ).contiguous(), torch.tensor(__A ), )
def lowerCAmelCase_ ( __A, __A, __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = torch_model.reformer
# word embeds
UpperCAmelCase__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings, torch.tensor(__A ), )
if isinstance(weights[3], __A ):
UpperCAmelCase__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
UpperCAmelCase__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
UpperCAmelCase__ = nn.Parameter(torch.tensor(__A ) )
UpperCAmelCase__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__A ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
UpperCAmelCase__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__A, __A, __A )
# output layer norm
UpperCAmelCase__ = np.asarray(weights[7][0] )
UpperCAmelCase__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm, torch.tensor(__A ), torch.tensor(__A ), )
# output embeddings
UpperCAmelCase__ = np.asarray(weights[9][0] )
UpperCAmelCase__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder, torch.tensor(__A ).transpose(0, 1 ).contiguous(), torch.tensor(__A ), )
def lowerCAmelCase_ ( __A, __A, __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = ReformerConfig.from_json_file(__A )
print(f"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase__ = ReformerModelWithLMHead(__A )
with open(__A, "rb" ) as f:
UpperCAmelCase__ = pickle.load(__A )["weights"]
set_model_weights_in_torch(__A, __A, config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict(), __A )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase__ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 65 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCAmelCase_ ( __A, __A=False ) -> Any:
'''simple docstring'''
try:
UpperCAmelCase__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase__ = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase__ = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
UpperCamelCase__ = parse_flag_from_env('RUN_SLOW', default=False)
UpperCamelCase__ = parse_flag_from_env('RUN_REMOTE', default=False)
UpperCamelCase__ = parse_flag_from_env('RUN_LOCAL', default=True)
UpperCamelCase__ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
UpperCamelCase__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
UpperCamelCase__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
UpperCamelCase__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
UpperCamelCase__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
UpperCamelCase__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
UpperCamelCase__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
UpperCamelCase__ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires faiss" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires regex" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[str]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires elasticsearch" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires sqlalchemy" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires PyTorch" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Union[str, Any]:
'''simple docstring'''
if not config.TF_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires TensorFlow" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires JAX" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
if not config.PIL_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires Pillow" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Tuple:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
def _require_spacy_model(__A ):
try:
import spacy # noqa F401
spacy.load(__A )
except ImportError:
return unittest.skip("test requires spacy" )(__A )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(__A ) )(__A )
else:
return test_case
return _require_spacy_model
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Tuple:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
UpperCAmelCase__ = unittest.skip("test is slow" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
UpperCAmelCase__ = unittest.skip("test is local" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCAmelCase__ = unittest.skip("test is packaged" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
UpperCAmelCase__ = unittest.skip("test requires remote" )(__A )
return test_case
def lowerCAmelCase_ ( *__A ) -> Optional[int]:
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__A ) and name.startswith("test" ):
for decorator in decorators:
UpperCAmelCase__ = decorator(__A )
setattr(cls, __A, __A )
return cls
return decorate
class A ( UpperCAmelCase_ ):
pass
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : str = 1
__UpperCAmelCase : int = 2
@contextmanager
def lowerCAmelCase_ ( __A=OfflineSimulationMode.CONNECTION_FAILS, __A=1e-16 ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = requests.Session().request
def timeout_request(__A, __A, __A, **__A ):
# Change the url to an invalid url so that the connection hangs
UpperCAmelCase__ = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
UpperCAmelCase__ = timeout
try:
return online_request(__A, __A, **__A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCAmelCase__ = url
UpperCAmelCase__ = e.args[0]
UpperCAmelCase__ = (max_retry_error.args[0].replace("10.255.255.1", f"""OfflineMock[{url}]""" ),)
UpperCAmelCase__ = (max_retry_error,)
raise
def raise_connection_error(__A, __A, **__A ):
raise requests.ConnectionError("Offline mode is enabled.", request=__A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send", __A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request", __A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE", __A ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def lowerCAmelCase_ ( *__A, **__A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__A, **__A ) as tmp_dir:
try:
os.chdir(__A )
yield
finally:
os.chdir(__A )
@contextmanager
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
import gc
gc.collect()
UpperCAmelCase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
import gc
gc.collect()
UpperCAmelCase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCAmelCase_ ( __A, __A ) -> List[str]:
'''simple docstring'''
return deepcopy(__A ).integers(0, 100, 10 ).tolist() == deepcopy(__A ).integers(0, 100, 10 ).tolist()
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(__A, *__A, **__A ):
try:
return func(*__A, **__A )
except HTTPError as err:
if str(__A ).startswith("500" ) or str(__A ).startswith("502" ):
pytest.xfail(str(__A ) )
raise err
return decorator.decorator(_wrapper, __A )
class A :
def __init__(self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = returncode
UpperCAmelCase__ = stdout
UpperCAmelCase__ = stderr
async def lowerCAmelCase_ ( __A, __A ) -> Optional[int]:
'''simple docstring'''
while True:
UpperCAmelCase__ = await stream.readline()
if line:
callback(__A )
else:
break
async def lowerCAmelCase_ ( __A, __A=None, __A=None, __A=None, __A=False, __A=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print("\nRunning: ", " ".join(__A ) )
UpperCAmelCase__ = await asyncio.create_subprocess_exec(
cmd[0], *cmd[1:], stdin=__A, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=__A, )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def tee(__A, __A, __A, __A="" ):
UpperCAmelCase__ = line.decode("utf-8" ).rstrip()
sink.append(__A )
if not quiet:
print(__A, __A, file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda __A : tee(__A, __A, sys.stdout, label="stdout:" ) ),
_read_stream(p.stderr, lambda __A : tee(__A, __A, sys.stderr, label="stderr:" ) ),
], timeout=__A, )
return _RunOutput(await p.wait(), __A, __A )
def lowerCAmelCase_ ( __A, __A=None, __A=None, __A=180, __A=False, __A=True ) -> _RunOutput:
'''simple docstring'''
UpperCAmelCase__ = asyncio.get_event_loop()
UpperCAmelCase__ = loop.run_until_complete(
_stream_subprocess(__A, env=__A, stdin=__A, timeout=__A, quiet=__A, echo=__A ) )
UpperCAmelCase__ = " ".join(__A )
if result.returncode > 0:
UpperCAmelCase__ = "\n".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = os.environ.get("PYTEST_XDIST_WORKER", "gw0" )
UpperCAmelCase__ = re.sub(r"^gw", "", __A, 0, re.M )
return int(__A )
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = 29_500
UpperCAmelCase__ = pytest_xdist_worker_id()
return port + uniq_delta
| 65 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
A_ : Dict = 8
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_=BITS )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = x.device
_UpperCAmelCase : int = (x * 255).int().clamp(0 , 255 )
_UpperCAmelCase : Tuple = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = rearrange(lowerCAmelCase_ , """d -> d 1 1""" )
_UpperCAmelCase : List[Any] = rearrange(lowerCAmelCase_ , """b c h w -> b c 1 h w""" )
_UpperCAmelCase : Optional[Any] = ((x & mask) != 0).float()
_UpperCAmelCase : int = rearrange(lowerCAmelCase_ , """b c d h w -> b (c d) h w""" )
_UpperCAmelCase : Tuple = bits * 2 - 1
return bits
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_=BITS )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = x.device
_UpperCAmelCase : Optional[int] = (x > 0).int()
_UpperCAmelCase : Any = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCAmelCase_ , dtype=torch.intaa )
_UpperCAmelCase : Tuple = rearrange(lowerCAmelCase_ , """d -> d 1 1""" )
_UpperCAmelCase : Dict = rearrange(lowerCAmelCase_ , """b (c d) h w -> b c d h w""" , d=8 )
_UpperCAmelCase : Optional[Any] = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 255).clamp(0.0 , 1.0 )
def snake_case_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = True , lowerCAmelCase_=None , lowerCAmelCase_ = True , )-> Union[DDIMSchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_UpperCAmelCase : str = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_UpperCAmelCase : List[str] = self.alphas_cumprod[timestep]
_UpperCAmelCase : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_UpperCAmelCase : List[Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_UpperCAmelCase : List[Any] = self.bit_scale
if self.config.clip_sample:
_UpperCAmelCase : Union[str, Any] = torch.clamp(lowerCAmelCase_ , -scale , lowerCAmelCase_ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_UpperCAmelCase : Union[str, Any] = self._get_variance(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : int = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_UpperCAmelCase : str = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase : Optional[int] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_UpperCAmelCase : Dict = model_output.device if torch.is_tensor(lowerCAmelCase_ ) else """cpu"""
_UpperCAmelCase : Union[str, Any] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=lowerCAmelCase_ ).to(lowerCAmelCase_ )
_UpperCAmelCase : int = self._get_variance(lowerCAmelCase_ , lowerCAmelCase_ ) ** 0.5 * eta * noise
_UpperCAmelCase : Tuple = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowerCAmelCase_ , pred_original_sample=lowerCAmelCase_ )
def snake_case_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="epsilon" , lowerCAmelCase_=None , lowerCAmelCase_ = True , )-> Union[DDPMSchedulerOutput, Tuple]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_UpperCAmelCase ,_UpperCAmelCase : Any = torch.split(lowerCAmelCase_ , sample.shape[1] , dim=1 )
else:
_UpperCAmelCase : List[Any] = None
# 1. compute alphas, betas
_UpperCAmelCase : Union[str, Any] = self.alphas_cumprod[t]
_UpperCAmelCase : int = self.alphas_cumprod[t - 1] if t > 0 else self.one
_UpperCAmelCase : Tuple = 1 - alpha_prod_t
_UpperCAmelCase : List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_UpperCAmelCase : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_UpperCAmelCase : str = model_output
else:
raise ValueError(F'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
_UpperCAmelCase : int = self.bit_scale
if self.config.clip_sample:
_UpperCAmelCase : Any = torch.clamp(lowerCAmelCase_ , -scale , lowerCAmelCase_ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCAmelCase : Any = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_UpperCAmelCase : int = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCAmelCase : Any = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_UpperCAmelCase : List[str] = 0
if t > 0:
_UpperCAmelCase : List[Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=lowerCAmelCase_ ).to(model_output.device )
_UpperCAmelCase : Tuple = (self._get_variance(lowerCAmelCase_ , predicted_variance=lowerCAmelCase_ ) ** 0.5) * noise
_UpperCAmelCase : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowerCAmelCase_ , pred_original_sample=lowerCAmelCase_ )
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_ ,a_ = 1.0 ,) -> Any:
super().__init__()
_UpperCAmelCase : List[Any] = bit_scale
_UpperCAmelCase : Any = (
ddim_bit_scheduler_step if isinstance(a_ ,a_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=a_ ,scheduler=a_ )
@torch.no_grad()
def __call__( self ,a_ = 256 ,a_ = 256 ,a_ = 50 ,a_ = None ,a_ = 1 ,a_ = "pil" ,a_ = True ,**a_ ,) -> Union[Tuple, ImagePipelineOutput]:
_UpperCAmelCase : int = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) ,generator=a_ ,)
_UpperCAmelCase : int = decimal_to_bits(a_ ) * self.bit_scale
_UpperCAmelCase : int = latents.to(self.device )
self.scheduler.set_timesteps(a_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_UpperCAmelCase : Union[str, Any] = self.unet(a_ ,a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : Optional[Any] = self.scheduler.step(a_ ,a_ ,a_ ).prev_sample
_UpperCAmelCase : List[str] = bits_to_decimal(a_ )
if output_type == "pil":
_UpperCAmelCase : List[str] = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 349 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Any = [10, 20, 30, 40, 50, 60]
_UpperCAmelCase : Dict = [2, 4, 6, 8, 10, 12]
_UpperCAmelCase : Optional[int] = 100
self.assertEqual(kp.calc_profit(a_ ,a_ ,a_ ) ,210 )
def _snake_case ( self ) -> Union[str, Any]:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Any:
self.assertRaisesRegex(a_ ,"""Weight can not be negative.""" )
def _snake_case ( self ) -> Optional[Any]:
self.assertRaisesRegex(a_ ,"""Profit can not be negative.""" )
def _snake_case ( self ) -> Dict:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Tuple:
self.assertRaisesRegex(
a_ ,"""The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 349 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ :str = logging.get_logger(__name__)
a_ :Dict = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """data2vec-text"""
def __init__( self : str, _snake_case : Optional[int]=3_0_5_2_2, _snake_case : Optional[Any]=7_6_8, _snake_case : Any=1_2, _snake_case : str=1_2, _snake_case : str=3_0_7_2, _snake_case : List[Any]="gelu", _snake_case : Optional[int]=0.1, _snake_case : Tuple=0.1, _snake_case : Dict=5_1_2, _snake_case : Union[str, Any]=2, _snake_case : Optional[Any]=0.0_2, _snake_case : List[str]=1e-12, _snake_case : Dict=1, _snake_case : str=0, _snake_case : Optional[int]=2, _snake_case : int="absolute", _snake_case : Any=True, _snake_case : Any=None, **_snake_case : Union[str, Any], ) ->int:
super().__init__(pad_token_id=_snake_case, bos_token_id=_snake_case, eos_token_id=_snake_case, **_snake_case )
snake_case__ : Tuple = vocab_size
snake_case__ : int = hidden_size
snake_case__ : str = num_hidden_layers
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Tuple = hidden_act
snake_case__ : Any = intermediate_size
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Optional[int] = type_vocab_size
snake_case__ : List[Any] = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : Union[str, Any] = position_embedding_type
snake_case__ : Union[str, Any] = use_cache
snake_case__ : Optional[int] = classifier_dropout
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
@property
def lowercase_ ( self : Optional[int] ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case__ : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case__ : Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 277 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ :int = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :int = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 277 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase : Optional[int] = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 361 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
SCREAMING_SNAKE_CASE_: str = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = -1
SCREAMING_SNAKE_CASE_: Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: int = TextStreamer(lowerCAmelCase__)
model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE_: Union[str, Any] = cs.out[:-1]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
SCREAMING_SNAKE_CASE_: int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = -1
SCREAMING_SNAKE_CASE_: int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = tokenizer.decode(greedy_ids[0])
SCREAMING_SNAKE_CASE_: int = TextIteratorStreamer(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
SCREAMING_SNAKE_CASE_: Tuple = Thread(target=model.generate , kwargs=lowerCAmelCase__)
thread.start()
SCREAMING_SNAKE_CASE_: Optional[Any] = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
SCREAMING_SNAKE_CASE_: int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = -1
SCREAMING_SNAKE_CASE_: Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = greedy_ids[:, input_ids.shape[1] :]
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: Dict = TextStreamer(lowerCAmelCase__ , skip_prompt=lowerCAmelCase__)
model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE_: Any = cs.out[:-1]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("distilgpt2")
SCREAMING_SNAKE_CASE_: List[str] = AutoModelForCausalLM.from_pretrained("distilgpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = -1
SCREAMING_SNAKE_CASE_: List[str] = torch.ones((1, 5) , device=lowerCAmelCase__).long() * model.config.bos_token_id
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: Union[str, Any] = TextStreamer(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
model.generate(lowerCAmelCase__ , max_new_tokens=1 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
SCREAMING_SNAKE_CASE_: str = cs.out[:-1] # Remove the final "\n"
SCREAMING_SNAKE_CASE_: Tuple = tokenizer(lowerCAmelCase__ , return_tensors="pt")
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
SCREAMING_SNAKE_CASE_: List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = -1
SCREAMING_SNAKE_CASE_: List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = TextIteratorStreamer(lowerCAmelCase__ , timeout=0.001)
SCREAMING_SNAKE_CASE_: Any = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
SCREAMING_SNAKE_CASE_: Optional[Any] = Thread(target=model.generate , kwargs=lowerCAmelCase__)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Tuple = ""
for new_text in streamer:
streamer_text += new_text
| 127 | 0 |
def __A ( __lowerCamelCase ) -> str: # noqa: E741
a = len(__lowerCamelCase )
a = 0
a = [0] * n
a = [False] * n
a = [False] * n
def dfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if parent == root:
out_edge_count += 1
a = True
a = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
a = dfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
a = True
# AP found via cycle
if at == low[to]:
a = True
else:
a = min(low[at] , __lowerCamelCase )
return out_edge_count
for i in range(__lowerCamelCase ):
if not visited[i]:
a = 0
a = dfs(__lowerCamelCase , __lowerCamelCase , -1 , __lowerCamelCase )
a = out_edge_count > 1
for x in range(len(__lowerCamelCase ) ):
if is_art[x] is True:
print(__lowerCamelCase )
# Adjacency list of graph
__UpperCamelCase : List[str] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 228 |
def __A ( __lowerCamelCase ) -> int:
a = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
a = hex_num[0] == """-"""
if is_negative:
a = hex_num[1:]
try:
a = int(__lowerCamelCase , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
a = """"""
while int_num > 0:
a = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 228 | 1 |
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : int =credit_card_number
a__ : str =0
a__ : Dict =len(SCREAMING_SNAKE_CASE ) - 2
for i in range(SCREAMING_SNAKE_CASE , -1 , -2 ):
# double the value of every second digit
a__ : str =int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
a__ : Any =cc_number[:i] + str(SCREAMING_SNAKE_CASE ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : Optional[Any] =f'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(f'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(SCREAMING_SNAKE_CASE ) <= 16:
print(f'''{error_message} of its length.''' )
return False
if not validate_initial_digits(SCREAMING_SNAKE_CASE ):
print(f'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(SCREAMING_SNAKE_CASE ):
print(f'''{error_message} it fails the Luhn check.''' )
return False
print(f'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 148 |
from maths.prime_check import is_prime
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
a__ : Dict =f'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE )
if is_prime(SCREAMING_SNAKE_CASE ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase__ = {'''tokenization_byt5''': ['''ByT5Tokenizer''']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 |
UpperCAmelCase__ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_lowercase ='''Morse code here!'''
print(__snake_case )
_lowercase =encrypt(__snake_case )
print(__snake_case )
_lowercase =decrypt(__snake_case )
print(__snake_case )
if __name__ == "__main__":
main()
| 5 | 1 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> List[Any]:
"""simple docstring"""
return choice(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : list[int] , _UpperCamelCase : int ) -> int:
"""simple docstring"""
snake_case = random_pivot(_UpperCamelCase )
# partition based on pivot
# linear time
snake_case = [e for e in lst if e < pivot]
snake_case = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_UpperCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_UpperCamelCase ) < k - 1:
return kth_number(_UpperCamelCase , k - len(_UpperCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149 | """simple docstring"""
import os
def lowerCAmelCase__ ( _UpperCamelCase : str = "matrix.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(_UpperCamelCase ) , _UpperCamelCase ) ) as in_file:
snake_case = in_file.read()
snake_case = [[int(_UpperCamelCase ) for cell in row.split(',' )] for row in data.strip().splitlines()]
snake_case = [[0 for cell in row] for row in grid]
snake_case = len(grid[0] )
snake_case = [[0 for i in range(_UpperCamelCase )] for j in range(_UpperCamelCase )]
snake_case = grid[0][0]
for i in range(1 , _UpperCamelCase ):
snake_case = grid[0][i] + dp[0][i - 1]
for i in range(1 , _UpperCamelCase ):
snake_case = grid[i][0] + dp[i - 1][0]
for i in range(1 , _UpperCamelCase ):
for j in range(1 , _UpperCamelCase ):
snake_case = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 149 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
a__ : List[Any] = 8
def _lowercase ( __A ,__A=BITS ):
'''simple docstring'''
__UpperCamelCase = x.device
__UpperCamelCase = (x * 255).int().clamp(0 ,255 )
__UpperCamelCase = 2 ** torch.arange(bits - 1 ,-1 ,-1 ,device=__A )
__UpperCamelCase = rearrange(__A ,"""d -> d 1 1""" )
__UpperCamelCase = rearrange(__A ,"""b c h w -> b c 1 h w""" )
__UpperCamelCase = ((x & mask) != 0).float()
__UpperCamelCase = rearrange(__A ,"""b c d h w -> b (c d) h w""" )
__UpperCamelCase = bits * 2 - 1
return bits
def _lowercase ( __A ,__A=BITS ):
'''simple docstring'''
__UpperCamelCase = x.device
__UpperCamelCase = (x > 0).int()
__UpperCamelCase = 2 ** torch.arange(bits - 1 ,-1 ,-1 ,device=__A ,dtype=torch.intaa )
__UpperCamelCase = rearrange(__A ,"""d -> d 1 1""" )
__UpperCamelCase = rearrange(__A ,"""b (c d) h w -> b c d h w""" ,d=8 )
__UpperCamelCase = reduce(x * mask ,"""b c d h w -> b c h w""" ,"""sum""" )
return (dec / 255).clamp(0.0 ,1.0 )
def _lowercase ( self ,__A ,__A ,__A ,__A = 0.0 ,__A = True ,__A=None ,__A = True ,):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__UpperCamelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__UpperCamelCase = self.alphas_cumprod[timestep]
__UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__UpperCamelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__UpperCamelCase = self.bit_scale
if self.config.clip_sample:
__UpperCamelCase = torch.clamp(__A ,-scale ,__A )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__UpperCamelCase = self._get_variance(__A ,__A )
__UpperCamelCase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__UpperCamelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__UpperCamelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__UpperCamelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__UpperCamelCase = model_output.device if torch.is_tensor(__A ) else """cpu"""
__UpperCamelCase = torch.randn(model_output.shape ,dtype=model_output.dtype ,generator=__A ).to(__A )
__UpperCamelCase = self._get_variance(__A ,__A ) ** 0.5 * eta * noise
__UpperCamelCase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__A ,pred_original_sample=__A )
def _lowercase ( self ,__A ,__A ,__A ,__A="epsilon" ,__A=None ,__A = True ,):
'''simple docstring'''
__UpperCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__UpperCamelCase , __UpperCamelCase = torch.split(__A ,sample.shape[1] ,dim=1 )
else:
__UpperCamelCase = None
# 1. compute alphas, betas
__UpperCamelCase = self.alphas_cumprod[t]
__UpperCamelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one
__UpperCamelCase = 1 - alpha_prod_t
__UpperCamelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__UpperCamelCase = model_output
else:
raise ValueError(f"Unsupported prediction_type {prediction_type}." )
# 3. Clip "predicted x_0"
__UpperCamelCase = self.bit_scale
if self.config.clip_sample:
__UpperCamelCase = torch.clamp(__A ,-scale ,__A )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCamelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__UpperCamelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__UpperCamelCase = 0
if t > 0:
__UpperCamelCase = torch.randn(
model_output.size() ,dtype=model_output.dtype ,layout=model_output.layout ,generator=__A ).to(model_output.device )
__UpperCamelCase = (self._get_variance(__A ,predicted_variance=__A ) ** 0.5) * noise
__UpperCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__A ,pred_original_sample=__A )
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase , lowercase = 1.0 , ) -> int:
super().__init__()
__UpperCamelCase = bit_scale
__UpperCamelCase = (
ddim_bit_scheduler_step if isinstance(lowercase , lowercase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowercase , scheduler=lowercase )
@torch.no_grad()
def __call__( self , lowercase = 2_5_6 , lowercase = 2_5_6 , lowercase = 5_0 , lowercase = None , lowercase = 1 , lowercase = "pil" , lowercase = True , **lowercase , ) -> Union[Tuple, ImagePipelineOutput]:
__UpperCamelCase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowercase , )
__UpperCamelCase = decimal_to_bits(lowercase ) * self.bit_scale
__UpperCamelCase = latents.to(self.device )
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__UpperCamelCase = self.unet(lowercase , lowercase ).sample
# compute the previous noisy sample x_t -> x_t-1
__UpperCamelCase = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
__UpperCamelCase = bits_to_decimal(lowercase )
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase )
| 349 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
a__ : Optional[Any] = logging.getLogger(__name__)
class UpperCAmelCase__ :
def __init__( self ) -> Any:
__UpperCamelCase = False
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> str:
if not self.initialized:
__UpperCamelCase = RagRetriever(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = True
def __lowerCamelCase ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> Dict:
__UpperCamelCase , __UpperCamelCase = self.retriever._main_retrieve(lowercase , lowercase )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> List[Any]:
if index is not None and index.is_initialized() and len(lowercase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase )
for worker in self.retrieval_workers
] )
def __lowerCamelCase ( self ) -> Dict:
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> List[str]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase = ray.get(random_worker.retrieve.remote(lowercase , lowercase ) )
else:
__UpperCamelCase , __UpperCamelCase = self._main_retrieve(lowercase , lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase=None , **lowercase ) -> Any:
return super(lowercase , cls ).get_tokenizers(lowercase , lowercase , **lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase=None , **lowercase ) -> int:
__UpperCamelCase = kwargs.pop("""config""" , lowercase ) or RagConfig.from_pretrained(lowercase , **lowercase )
__UpperCamelCase = RagTokenizer.from_pretrained(lowercase , config=lowercase )
__UpperCamelCase = rag_tokenizer.question_encoder
__UpperCamelCase = rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase = """custom"""
__UpperCamelCase = CustomHFIndex(config.retrieval_vector_size , lowercase )
else:
__UpperCamelCase = cls._build_index(lowercase )
return cls(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
| 349 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A_ = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 296 |
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[Any] ):
'''simple docstring'''
super().__init__()
A__ : int = nn.Linear(3 , 4 )
A__ : Union[str, Any] = nn.BatchNormad(4 )
A__ : Union[str, Any] = nn.Linear(4 , 5 )
def _UpperCamelCase ( self : str , snake_case : List[str] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(snake_case ) ) )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : int = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , model.state_dict() )
A__ : List[str] = os.path.join(snake_case , """index.json""" )
self.assertTrue(os.path.isfile(snake_case ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
A__ : List[str] = os.path.join(snake_case , F'{key}.dat' )
self.assertTrue(os.path.isfile(snake_case ) )
# TODO: add tests on the fact weights are properly loaded
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Union[str, Any] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
A__ : str = torch.randn(2 , 3 , dtype=snake_case )
with TemporaryDirectory() as tmp_dir:
A__ : List[str] = offload_weight(snake_case , """weight""" , snake_case , {} )
A__ : Union[str, Any] = os.path.join(snake_case , """weight.dat""" )
self.assertTrue(os.path.isfile(snake_case ) )
self.assertDictEqual(snake_case , {"""weight""": {"""shape""": [2, 3], """dtype""": str(snake_case ).split(""".""" )[1]}} )
A__ : str = load_offloaded_weight(snake_case , index["""weight"""] )
self.assertTrue(torch.equal(snake_case , snake_case ) )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : str = ModelForTest()
A__ : Union[str, Any] = model.state_dict()
A__ : Optional[int] = {k: v for k, v in state_dict.items() if """linear2""" not in k}
A__ : List[Any] = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
A__ : Dict = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
A__ : int = {k: v for k, v in state_dict.items() if """weight""" in k}
A__ : Tuple = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
A__ : Optional[Any] = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
# Duplicates are removed
A__ : int = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : List[str] = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
A__ : str = extract_submodules_state_dict(snake_case , ["""a.1""", """a.2"""] )
self.assertDictEqual(snake_case , {"""a.1""": 0, """a.2""": 2} )
A__ : Dict = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
A__ : int = extract_submodules_state_dict(snake_case , ["""a.1""", """a.2"""] )
self.assertDictEqual(snake_case , {"""a.1.a""": 0, """a.2.a""": 2} )
| 296 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase__ :Dict = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowercase__ :Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 101 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = {}
snake_case = job['''started_at''']
snake_case = job['''completed_at''']
snake_case = date_parser.parse(UpperCamelCase_ )
snake_case = date_parser.parse(UpperCamelCase_ )
snake_case = round((end_datetime - start_datetime).total_seconds() / 60.0 )
snake_case = start
snake_case = end
snake_case = duration_in_min
return job_info
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_=None ):
"""simple docstring"""
snake_case = None
if token is not None:
snake_case = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
snake_case = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
snake_case = requests.get(UpperCamelCase_ ,headers=UpperCamelCase_ ).json()
snake_case = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(UpperCamelCase_ ) for job in result['''jobs''']} )
snake_case = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(UpperCamelCase_ ):
snake_case = requests.get(url + F'''&page={i + 2}''' ,headers=UpperCamelCase_ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(UpperCamelCase_ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
_SCREAMING_SNAKE_CASE : int = get_job_time(args.workflow_run_id)
_SCREAMING_SNAKE_CASE : Dict = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v['duration']}''')
| 127 | 0 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def __lowercase ( __lowercase = 150_0000 ) -> int:
'''simple docstring'''
_A = defaultdict(__lowercase )
_A = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __lowercase , 2 ):
if gcd(__lowercase , __lowercase ) > 1:
continue
_A = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__lowercase , limit + 1 , __lowercase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 174 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCamelCase_ = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 174 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCamelCase__ ( lowercase__ : int ):
if num <= 0:
snake_case : List[Any] = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(lowercase__ )
snake_case : Union[str, Any] = [True] * (num + 1)
snake_case : List[Any] = []
snake_case : List[Any] = 2
snake_case : Tuple = int(math.sqrt(lowercase__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowercase__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowercase__ ):
if sieve[i] is True:
snake_case : Dict = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowercase__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 148 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCamelCase__ ( lowerCamelCase_ , lowerCamelCase_ ):
a__ : Tuple = 1
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE=2_000 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=20 , SCREAMING_SNAKE_CASE=1E-3 ):
"""simple docstring"""
snake_case : Optional[Any] = None
snake_case : List[str] = None
snake_case : Any = None
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
snake_case : Any = torch.linspace(1 , self.config.sampling_eps , SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
snake_case : Any = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
snake_case : Optional[int] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
snake_case : List[str] = std.flatten()
while len(std.shape ) < len(score.shape ):
snake_case : Any = std.unsqueeze(-1 )
snake_case : Optional[Any] = -score / std
# compute
snake_case : int = -1.0 / len(self.timesteps )
snake_case : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
snake_case : str = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
snake_case : Union[str, Any] = beta_t.unsqueeze(-1 )
snake_case : Tuple = -0.5 * beta_t * x
snake_case : Tuple = torch.sqrt(SCREAMING_SNAKE_CASE )
snake_case : List[str] = drift - diffusion**2 * score
snake_case : List[str] = x + drift * dt
# add noise
snake_case : Optional[int] = randn_tensor(x.shape , layout=x.layout , generator=SCREAMING_SNAKE_CASE , device=x.device , dtype=x.dtype )
snake_case : Dict = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 148 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE_ = (7_2_0, 1_2_8_0) # Height, Width
SCREAMING_SNAKE_CASE_ = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE_ = 1 / 1_0_0
SCREAMING_SNAKE_CASE_ = """"""
SCREAMING_SNAKE_CASE_ = """"""
SCREAMING_SNAKE_CASE_ = """"""
SCREAMING_SNAKE_CASE_ = 2_5_0
def __lowercase ( ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = get_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for index in range(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = random.sample(range(len(_SCREAMING_SNAKE_CASE ) ) , 4 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = update_image_and_anno(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , filter_scale=_SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
SCREAMING_SNAKE_CASE = random_chars(32 )
SCREAMING_SNAKE_CASE = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
SCREAMING_SNAKE_CASE = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , _SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
SCREAMING_SNAKE_CASE = []
for anno in new_annos:
SCREAMING_SNAKE_CASE = anno[3] - anno[1]
SCREAMING_SNAKE_CASE = anno[4] - anno[2]
SCREAMING_SNAKE_CASE = anno[1] + width / 2
SCREAMING_SNAKE_CASE = anno[2] + height / 2
SCREAMING_SNAKE_CASE = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(_SCREAMING_SNAKE_CASE )
with open(F"""{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> tuple[list, list]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for label_file in glob.glob(os.path.join(_SCREAMING_SNAKE_CASE , """*.txt""" ) ):
SCREAMING_SNAKE_CASE = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(_SCREAMING_SNAKE_CASE ) as in_file:
SCREAMING_SNAKE_CASE = in_file.readlines()
SCREAMING_SNAKE_CASE = os.path.join(_SCREAMING_SNAKE_CASE , F"""{label_name}.jpg""" )
SCREAMING_SNAKE_CASE = []
for obj_list in obj_lists:
SCREAMING_SNAKE_CASE = obj_list.rstrip("""\n""" ).split(""" """ )
SCREAMING_SNAKE_CASE = float(obj[1] ) - float(obj[3] ) / 2
SCREAMING_SNAKE_CASE = float(obj[2] ) - float(obj[4] ) / 2
SCREAMING_SNAKE_CASE = float(obj[1] ) + float(obj[3] ) / 2
SCREAMING_SNAKE_CASE = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_SCREAMING_SNAKE_CASE )
labels.append(_SCREAMING_SNAKE_CASE )
return img_paths, labels
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.0 , ) -> tuple[list, list, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
SCREAMING_SNAKE_CASE = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
SCREAMING_SNAKE_CASE = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
SCREAMING_SNAKE_CASE = int(scale_x * output_size[1] )
SCREAMING_SNAKE_CASE = int(scale_y * output_size[0] )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i, index in enumerate(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = all_img_list[index]
path_list.append(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = all_annos[index]
SCREAMING_SNAKE_CASE = cva.imread(_SCREAMING_SNAKE_CASE )
if i == 0: # top-left
SCREAMING_SNAKE_CASE = cva.resize(_SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE = bbox[1] * scale_x
SCREAMING_SNAKE_CASE = bbox[2] * scale_y
SCREAMING_SNAKE_CASE = bbox[3] * scale_x
SCREAMING_SNAKE_CASE = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
SCREAMING_SNAKE_CASE = cva.resize(_SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE = scale_x + bbox[1] * (1 - scale_x)
SCREAMING_SNAKE_CASE = bbox[2] * scale_y
SCREAMING_SNAKE_CASE = scale_x + bbox[3] * (1 - scale_x)
SCREAMING_SNAKE_CASE = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
SCREAMING_SNAKE_CASE = cva.resize(_SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE = bbox[1] * scale_x
SCREAMING_SNAKE_CASE = scale_y + bbox[2] * (1 - scale_y)
SCREAMING_SNAKE_CASE = bbox[3] * scale_x
SCREAMING_SNAKE_CASE = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
SCREAMING_SNAKE_CASE = cva.resize(
_SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE = scale_x + bbox[1] * (1 - scale_x)
SCREAMING_SNAKE_CASE = scale_y + bbox[2] * (1 - scale_y)
SCREAMING_SNAKE_CASE = scale_x + bbox[3] * (1 - scale_x)
SCREAMING_SNAKE_CASE = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
SCREAMING_SNAKE_CASE = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
SCREAMING_SNAKE_CASE = ascii_lowercase + digits
return "".join(random.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 193 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = TypeVar("""DatasetType""", Dataset, IterableDataset)
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(_SCREAMING_SNAKE_CASE ):
if not isinstance(_SCREAMING_SNAKE_CASE , (Dataset, IterableDataset) ):
if isinstance(_SCREAMING_SNAKE_CASE , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(_SCREAMING_SNAKE_CASE )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_SCREAMING_SNAKE_CASE ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_SCREAMING_SNAKE_CASE ).__name__}.""" )
if i == 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = (
(Dataset, IterableDataset) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else (IterableDataset, Dataset)
)
elif not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , info=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , stopping_strategy=_SCREAMING_SNAKE_CASE )
else:
return _interleave_iterable_datasets(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , info=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , stopping_strategy=_SCREAMING_SNAKE_CASE )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(_SCREAMING_SNAKE_CASE ):
if not isinstance(_SCREAMING_SNAKE_CASE , (Dataset, IterableDataset) ):
if isinstance(_SCREAMING_SNAKE_CASE , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(_SCREAMING_SNAKE_CASE )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_SCREAMING_SNAKE_CASE ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_SCREAMING_SNAKE_CASE ).__name__}.""" )
if i == 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = (
(Dataset, IterableDataset) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else (IterableDataset, Dataset)
)
elif not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_SCREAMING_SNAKE_CASE , info=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , axis=_SCREAMING_SNAKE_CASE )
else:
return _concatenate_iterable_datasets(_SCREAMING_SNAKE_CASE , info=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , axis=_SCREAMING_SNAKE_CASE )
| 193 | 1 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowerCAmelCase_ ( A_ = 3):
if isinstance(A_ ,A_):
raise TypeError("number of qubits must be a integer.")
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0.")
if math.floor(A_) != number_of_qubits:
raise ValueError("number of qubits must be exact integer.")
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10).")
UpperCamelCase__: Optional[Any] = QuantumRegister(A_ ,"qr")
UpperCamelCase__: str = ClassicalRegister(A_ ,"cr")
UpperCamelCase__: str = QuantumCircuit(A_ ,A_)
UpperCamelCase__: Optional[Any] = number_of_qubits
for i in range(A_):
quantum_circuit.h(number_of_qubits - i - 1)
counter -= 1
for j in range(A_):
quantum_circuit.cp(np.pi / 2 ** (counter - j) ,A_ ,A_)
for k in range(number_of_qubits // 2):
quantum_circuit.swap(A_ ,number_of_qubits - k - 1)
# measure all the qubits
quantum_circuit.measure(A_ ,A_)
# simulate with 10000 shots
UpperCamelCase__: List[str] = Aer.get_backend("qasm_simulator")
UpperCamelCase__: Optional[Any] = execute(A_ ,A_ ,shots=1_00_00)
return job.result().get_counts(A_)
if __name__ == "__main__":
print(
f"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 149 |
def lowerCAmelCase_ ( A_ ,A_):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(A_ ,int(b / 2)) * actual_power(A_ ,int(b / 2))
else:
return a * actual_power(A_ ,int(b / 2)) * actual_power(A_ ,int(b / 2))
def lowerCAmelCase_ ( A_ ,A_):
if b < 0:
return 1 / actual_power(A_ ,A_)
return actual_power(A_ ,A_)
if __name__ == "__main__":
print(power(-2, -3))
| 149 | 1 |
"""simple docstring"""
import argparse
import os
import re
UpperCAmelCase = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
UpperCAmelCase = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCAmelCase = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCAmelCase = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCAmelCase = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCAmelCase = re.compile(r"""\[([^\]]+)\]""")
def lowercase ( a__ : Dict ) -> List[Any]:
_UpperCamelCase = _re_indent.search(a__ )
return "" if search is None else search.groups()[0]
def lowercase ( a__ : int , a__ : List[str]="" , a__ : int=None , a__ : Any=None ) -> Optional[int]:
_UpperCamelCase = 0
_UpperCamelCase = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(a__ ):
index += 1
_UpperCamelCase = ['''\n'''.join(lines[:index] )]
else:
_UpperCamelCase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_UpperCamelCase = [lines[index]]
index += 1
while index < len(a__ ) and (end_prompt is None or not lines[index].startswith(a__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(a__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(a__ ) )
if index < len(a__ ) - 1:
_UpperCamelCase = [lines[index + 1]]
index += 1
else:
_UpperCamelCase = []
else:
blocks.append('''\n'''.join(a__ ) )
_UpperCamelCase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(a__ ) > 0:
blocks.append('''\n'''.join(a__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(a__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def lowercase ( a__ : Union[str, Any] ) -> Tuple:
def _inner(a__ : str ):
return key(a__ ).lower().replace('''_''' , '''''' )
return _inner
def lowercase ( a__ : Optional[Any] , a__ : Dict=None ) -> int:
def noop(a__ : Optional[int] ):
return x
if key is None:
_UpperCamelCase = noop
# Constants are all uppercase, they go first.
_UpperCamelCase = [obj for obj in objects if key(a__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_UpperCamelCase = [obj for obj in objects if key(a__ )[0].isupper() and not key(a__ ).isupper()]
# Functions begin with a lowercase, they go last.
_UpperCamelCase = [obj for obj in objects if not key(a__ )[0].isupper()]
_UpperCamelCase = ignore_underscore(a__ )
return sorted(a__ , key=a__ ) + sorted(a__ , key=a__ ) + sorted(a__ , key=a__ )
def lowercase ( a__ : List[str] ) -> Any:
def _replace(a__ : List[str] ):
_UpperCamelCase = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
_UpperCamelCase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_UpperCamelCase = keys[:-1]
return "[" + ", ".join([F'''\"{k}\"''' for k in sort_objects(a__ )] ) + "]"
_UpperCamelCase = import_statement.split('''\n''' )
if len(a__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_UpperCamelCase = 2 if lines[1].strip() == '''[''' else 1
_UpperCamelCase = [(i, _re_strip_line.search(a__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_UpperCamelCase = sort_objects(a__ , key=lambda a__ : x[1] )
_UpperCamelCase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(a__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_UpperCamelCase = _re_bracket_content.sub(_replace , lines[1] )
else:
_UpperCamelCase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_UpperCamelCase = keys[:-1]
_UpperCamelCase = get_indent(lines[1] ) + ''', '''.join([F'''\"{k}\"''' for k in sort_objects(a__ )] )
return "\n".join(a__ )
else:
# Finally we have to deal with imports fitting on one line
_UpperCamelCase = _re_bracket_content.sub(_replace , a__ )
return import_statement
def lowercase ( a__ : Optional[Any] , a__ : List[Any]=True ) -> Optional[int]:
with open(a__ , '''r''' ) as f:
_UpperCamelCase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_UpperCamelCase = split_code_in_indented_blocks(
a__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(a__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_UpperCamelCase = main_blocks[block_idx]
_UpperCamelCase = block.split('''\n''' )
# Get to the start of the imports.
_UpperCamelCase = 0
while line_idx < len(a__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_UpperCamelCase = len(a__ )
else:
line_idx += 1
if line_idx >= len(a__ ):
continue
# Ignore beginning and last line: they don't contain anything.
_UpperCamelCase = '''\n'''.join(block_lines[line_idx:-1] )
_UpperCamelCase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_UpperCamelCase = split_code_in_indented_blocks(a__ , indent_level=a__ )
# We have two categories of import key: list or _import_structure[key].append/extend
_UpperCamelCase = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_UpperCamelCase = [(pattern.search(a__ ).groups()[0] if pattern.search(a__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_UpperCamelCase = [(i, key) for i, key in enumerate(a__ ) if key is not None]
_UpperCamelCase = [x[0] for x in sorted(a__ , key=lambda a__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_UpperCamelCase = 0
_UpperCamelCase = []
for i in range(len(a__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_UpperCamelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(a__ )
count += 1
# And we put our main block back together with its first and last line.
_UpperCamelCase = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(a__ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(a__ , '''w''' ) as f:
f.write('''\n'''.join(a__ ) )
def lowercase ( a__ : str=True ) -> Dict:
_UpperCamelCase = []
for root, _, files in os.walk(a__ ):
if "__init__.py" in files:
_UpperCamelCase = sort_imports(os.path.join(a__ , '''__init__.py''' ) , check_only=a__ )
if result:
_UpperCamelCase = [os.path.join(a__ , '''__init__.py''' )]
if len(a__ ) > 0:
raise ValueError(F'''Would overwrite {len(a__ )} files, run `make style`.''' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCAmelCase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 369 | """simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
snake_case__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] ) -> int:
_UpperCamelCase = TextaTextGenerationPipeline(model=__UpperCamelCase , tokenizer=__UpperCamelCase )
return generator, ["Something to write", "Something else"]
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : List[Any] ) -> Union[str, Any]:
_UpperCamelCase = generator('''Something there''' )
self.assertEqual(__UpperCamelCase , [{'''generated_text''': ANY(__UpperCamelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
_UpperCamelCase = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
] , )
_UpperCamelCase = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
] , )
with self.assertRaises(__UpperCamelCase ):
generator(4 )
@require_torch
def _UpperCamelCase ( self : List[str] ) -> List[str]:
_UpperCamelCase = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
_UpperCamelCase = generator('''Something there''' , do_sample=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [{'''generated_text''': ''''''}] )
_UpperCamelCase = 3
_UpperCamelCase = generator(
'''Something there''' , num_return_sequences=__UpperCamelCase , num_beams=__UpperCamelCase , )
_UpperCamelCase = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = generator('''This is a test''' , do_sample=__UpperCamelCase , num_return_sequences=2 , return_tensors=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
_UpperCamelCase = generator.model.config.eos_token_id
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = generator(
['''This is a test''', '''This is a second test'''] , do_sample=__UpperCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCamelCase , )
self.assertEqual(
__UpperCamelCase , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def _UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
_UpperCamelCase = generator('''Something there''' , do_sample=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [{'''generated_text''': ''''''}] )
| 54 | 0 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int]=14 ,lowerCamelCase__ : str=7 ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : str=False ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Any=99 ,lowerCamelCase__ : Tuple=32 ,lowerCamelCase__ : Optional[Any]=4 ,lowerCamelCase__ : Tuple=4 ,lowerCamelCase__ : str=4 ,lowerCamelCase__ : List[Any]=37 ,lowerCamelCase__ : Optional[int]="gelu" ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : List[Any]=512 ,lowerCamelCase__ : List[str]=0.02 ,) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = rotary_dim
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = vocab_size - 1
SCREAMING_SNAKE_CASE = vocab_size - 1
SCREAMING_SNAKE_CASE = vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = GPTJConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,use_cache=lowerCamelCase__ ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,rotary_dim=self.rotary_dim ,)
return (config, input_ids, input_mask)
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 20
SCREAMING_SNAKE_CASE = model_class_name(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model.init_cache(input_ids.shape[0] ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = jnp.ones((input_ids.shape[0], max_decoder_length) ,dtype="""i4""" )
SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] ,(input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE = model(
input_ids[:, :-1] ,attention_mask=lowerCamelCase__ ,past_key_values=lowerCamelCase__ ,position_ids=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] ,dtype="""i4""" )
SCREAMING_SNAKE_CASE = model(
input_ids[:, -1:] ,attention_mask=lowerCamelCase__ ,past_key_values=outputs_cache.past_key_values ,position_ids=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=F"""Max diff is {diff}""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 20
SCREAMING_SNAKE_CASE = model_class_name(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] ,axis=-1 ,)
SCREAMING_SNAKE_CASE = model.init_cache(input_ids.shape[0] ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] ,(input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE = model(
input_ids[:, :-1] ,attention_mask=lowerCamelCase__ ,past_key_values=lowerCamelCase__ ,position_ids=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] ,dtype="""i4""" )
SCREAMING_SNAKE_CASE = model(
input_ids[:, -1:] ,past_key_values=outputs_cache.past_key_values ,attention_mask=lowerCamelCase__ ,position_ids=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=F"""Max diff is {diff}""" )
@require_flax
class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : List[Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__snake_case : List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxGPTJModelTester(self )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any:
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
@tooslow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained("""gpt2""" ,pad_token="""<|endoftext|>""" ,padding_side="""left""" )
SCREAMING_SNAKE_CASE = tokenizer(["""Hello this is a long string""", """Hey"""] ,return_tensors="""np""" ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = model.config.eos_token_id
SCREAMING_SNAKE_CASE = jax.jit(model.generate )
SCREAMING_SNAKE_CASE = jit_generate(
inputs["""input_ids"""] ,attention_mask=inputs["""attention_mask"""] ,pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE = getattr(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE = np.random.randint(0 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = pt_model_class(lowerCamelCase__ ).eval()
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ ,dtype=jnp.floataa )
SCREAMING_SNAKE_CASE = convert_pytorch_state_dict_to_flax(pt_model.state_dict() ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE = pt_model(**lowerCamelCase__ ).to_tuple()
SCREAMING_SNAKE_CASE = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) ,len(lowerCamelCase__ ) ,"""Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] ,pt_output[:, -1].numpy() ,4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model_class.from_pretrained(lowerCamelCase__ ,from_pt=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = fx_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) ,len(lowerCamelCase__ ) ,"""Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] ,pt_output[:, -1].numpy() ,4e-2 )
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE = getattr(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = pt_model_class(lowerCamelCase__ ).eval()
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ ,dtype=jnp.floataa )
SCREAMING_SNAKE_CASE = load_flax_weights_in_pytorch_model(lowerCamelCase__ ,fx_model.params )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE = np.random.randint(0 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE = pt_model(**lowerCamelCase__ ).to_tuple()
SCREAMING_SNAKE_CASE = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) ,len(lowerCamelCase__ ) ,"""Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] ,pt_output[:, -1].numpy() ,4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = pt_model_class.from_pretrained(lowerCamelCase__ ,from_flax=lowerCamelCase__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE = pt_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) ,len(lowerCamelCase__ ) ,"""Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] ,pt_output[:, -1].numpy() ,4e-2 )
@tooslow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 296 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 296 | 1 |
from collections.abc import Generator
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = 0, 1
while True:
UpperCAmelCase_ : Any = b, a + b
yield b
def lowerCamelCase__ ( _lowercase = 1000 ):
'''simple docstring'''
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Union[str, Any] = fibonacci_generator()
while len(str(next(_lowercase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 362 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = (IPNDMScheduler,)
lowerCAmelCase = (('''num_inference_steps''', 50),)
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ : Tuple = {'''num_train_timesteps''': 1_000}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def a__ ( self ,_SCREAMING_SNAKE_CASE=0 ,**_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : int = kwargs.pop('''num_inference_steps''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = self.dummy_sample
UpperCAmelCase_ : Optional[Any] = 0.1 * sample
UpperCAmelCase_ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Tuple = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:]
if time_step is None:
UpperCAmelCase_ : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCAmelCase_ : Optional[int] = dummy_past_residuals[:]
UpperCAmelCase_ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : str = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : Union[str, Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Optional[int] = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ,_SCREAMING_SNAKE_CASE=0 ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : List[str] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Tuple = kwargs.pop('''num_inference_steps''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = self.dummy_sample
UpperCAmelCase_ : Tuple = 0.1 * sample
UpperCAmelCase_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : str = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ : Tuple = dummy_past_residuals[:]
if time_step is None:
UpperCAmelCase_ : Dict = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ : str = dummy_past_residuals[:]
UpperCAmelCase_ : Tuple = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Optional[int] = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : str = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : List[str] = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = 10
UpperCAmelCase_ : Tuple = self.dummy_model()
UpperCAmelCase_ : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : int = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).prev_sample
return sample
def a__ ( self ) -> str:
UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Any = kwargs.pop('''num_inference_steps''' ,_SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : int = self.get_scheduler_config()
UpperCAmelCase_ : List[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = self.dummy_sample
UpperCAmelCase_ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(_SCREAMING_SNAKE_CASE ,'''set_timesteps''' ):
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(_SCREAMING_SNAKE_CASE ,'''set_timesteps''' ):
UpperCAmelCase_ : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ : int = dummy_past_residuals[:]
UpperCAmelCase_ : List[Any] = scheduler.timesteps[5]
UpperCAmelCase_ : Dict = scheduler.timesteps[6]
UpperCAmelCase_ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
UpperCAmelCase_ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def a__ ( self ) -> Any:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE ,time_step=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=_SCREAMING_SNAKE_CASE ,time_step=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = self.full_loop()
UpperCAmelCase_ : str = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 2_540_529 ) < 10 | 235 | 0 |
'''simple docstring'''
import os
_UpperCAmelCase : int = {"""I""": 1, """V""": 5, """X""": 1_0, """L""": 5_0, """C""": 1_0_0, """D""": 5_0_0, """M""": 1_0_0_0}
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = 0
__lowerCAmelCase = 0
while index < len(lowerCamelCase) - 1:
__lowerCAmelCase = SYMBOLS[numerals[index]]
__lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = ''''''
__lowerCAmelCase = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
__lowerCAmelCase = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
__lowerCAmelCase = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __magic_name__( lowerCamelCase = "/p089_roman.txt"):
__lowerCAmelCase = 0
with open(os.path.dirname(lowerCamelCase) + roman_numerals_filename) as filea:
__lowerCAmelCase = filea.readlines()
for line in lines:
__lowerCAmelCase = line.strip()
__lowerCAmelCase = parse_roman_numerals(lowerCamelCase)
__lowerCAmelCase = generate_roman_numerals(lowerCamelCase)
savings += len(lowerCamelCase) - len(lowerCamelCase)
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 174 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline
__UpperCamelCase : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
__UpperCamelCase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
__UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _snake_case (self ):
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__lowercase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__lowerCAmelCase = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=32 , )
__lowerCAmelCase = CLIPTextModel(__lowercase )
__lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowercase )
__lowerCAmelCase = CLIPTextModelWithProjection(__lowercase )
__lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowercase )
__lowerCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _snake_case (self , __lowercase , __lowercase=0 ):
__lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowerCAmelCase = image / 2 + 0.5
if str(__lowercase ).startswith('''mps''' ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
else:
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.7_5,
}
return inputs
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**__lowercase )
__lowerCAmelCase = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = sd_pipe(**__lowercase ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case (self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _snake_case (self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _snake_case (self ):
pass
def _snake_case (self ):
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**__lowercase )
__lowerCAmelCase = sd_pipe.to(__lowercase )
__lowerCAmelCase = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
# forward without prompt embeds
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 3 * ['''this is a negative prompt''']
__lowerCAmelCase = negative_prompt
__lowerCAmelCase = 3 * [inputs['''prompt''']]
__lowerCAmelCase = sd_pipe(**__lowercase )
__lowerCAmelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 3 * ['''this is a negative prompt''']
__lowerCAmelCase = 3 * [inputs.pop('''prompt''' )]
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = sd_pipe.encode_prompt(__lowercase , negative_prompt=__lowercase )
__lowerCAmelCase = sd_pipe(
**__lowercase , prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , pooled_prompt_embeds=__lowercase , negative_pooled_prompt_embeds=__lowercase , )
__lowerCAmelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case (self , __lowercase , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=0 ):
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = np.random.RandomState(__lowercase ).standard_normal((1, 4, 64, 64) )
__lowerCAmelCase = torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
__lowerCAmelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case (self ):
__lowerCAmelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs(__lowercase )
__lowerCAmelCase = pipe(**__lowercase ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 174 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''OwlViTFeatureExtractor''']
__UpperCAmelCase = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 367 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = ShapEPipeline
UpperCAmelCase__ : List[Any] = ["prompt"]
UpperCAmelCase__ : List[str] = ["prompt"]
UpperCAmelCase__ : int = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Union[str, Any] = False
@property
def snake_case_ ( self ) -> Union[str, Any]:
return 32
@property
def snake_case_ ( self ) -> List[str]:
return 32
@property
def snake_case_ ( self ) -> int:
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> Optional[int]:
return 8
@property
def snake_case_ ( self ) -> str:
UpperCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def snake_case_ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCamelCase : Dict = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ) -> Dict:
torch.manual_seed(0 )
UpperCamelCase : int = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
UpperCamelCase : Any = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def snake_case_ ( self ) -> Tuple:
torch.manual_seed(0 )
UpperCamelCase : Any = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
UpperCamelCase : Dict = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[Any] = self.dummy_prior
UpperCamelCase : int = self.dummy_text_encoder
UpperCamelCase : Dict = self.dummy_tokenizer
UpperCamelCase : List[str] = self.dummy_renderer
UpperCamelCase : str = HeunDiscreteScheduler(
beta_schedule='exp', num_train_timesteps=1024, prediction_type='sample', use_karras_sigmas=SCREAMING_SNAKE_CASE_, clip_sample=SCREAMING_SNAKE_CASE_, clip_sample_range=1.0, )
UpperCamelCase : List[str] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Any:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Any = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def snake_case_ ( self ) -> int:
UpperCamelCase : str = 'cpu'
UpperCamelCase : Optional[int] = self.get_dummy_components()
UpperCamelCase : int = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Optional[int] = output.images[0]
UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase : List[str] = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> int:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_ ( self ) -> str:
UpperCamelCase : str = torch_device == 'cpu'
UpperCamelCase : Optional[int] = True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=SCREAMING_SNAKE_CASE_, relax_max_difference=SCREAMING_SNAKE_CASE_, )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : List[Any] = self.get_dummy_components()
UpperCamelCase : str = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = 1
UpperCamelCase : Union[str, Any] = 2
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase : List[Any] = batch_size * [inputs[key]]
UpperCamelCase : int = pipe(**SCREAMING_SNAKE_CASE_, num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
UpperCamelCase : Optional[int] = ShapEPipeline.from_pretrained('openai/shap-e' )
UpperCamelCase : Optional[int] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
UpperCamelCase : Dict = pipe(
'a shark', generator=SCREAMING_SNAKE_CASE_, guidance_scale=15.0, num_inference_steps=64, frame_size=64, output_type='np', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 103 | 0 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a__: Optional[int] = '\\n\n'
a__: Tuple = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
a__: Optional[int] = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ),reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''],)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = 16,__lowerCamelCase = True,__lowerCamelCase=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
A__ = '''cuda'''
else:
A__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
A__ = AutoModelForCausalLM.from_pretrained(__lowerCamelCase )
A__ = model.to(__lowerCamelCase )
A__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
A__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__lowerCamelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
A__ = model.config.max_length - 1
else:
A__ = model.config.max_length
A__ = tokenizer(
__lowerCamelCase,add_special_tokens=__lowerCamelCase,padding=__lowerCamelCase,truncation=__lowerCamelCase,max_length=__lowerCamelCase,return_tensors='''pt''',return_attention_mask=__lowerCamelCase,).to(__lowerCamelCase )
A__ = encodings['''input_ids''']
A__ = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ),1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ),2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
A__ = []
A__ = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0,len(__lowerCamelCase ),__lowerCamelCase ) ):
A__ = min(start_index + batch_size,len(__lowerCamelCase ) )
A__ = encoded_texts[start_index:end_index]
A__ = attn_masks[start_index:end_index]
if add_start_token:
A__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__lowerCamelCase )
A__ = torch.cat([bos_tokens_tensor, encoded_batch],dim=1 )
A__ = torch.cat(
[torch.ones(bos_tokens_tensor.size(),dtype=torch.intaa ).to(__lowerCamelCase ), attn_mask],dim=1 )
A__ = encoded_batch
with torch.no_grad():
A__ = model(__lowerCamelCase,attention_mask=__lowerCamelCase ).logits
A__ = out_logits[..., :-1, :].contiguous()
A__ = labels[..., 1:].contiguous()
A__ = attn_mask[..., 1:].contiguous()
A__ = torch.expa(
(loss_fct(shift_logits.transpose(1,2 ),__lowerCamelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__lowerCamelCase )}
| 193 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a__: List[str] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = generator.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase ( self ):
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''',torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=50,output_type='''numpy''' ).images
A__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 193 | 1 |
"""simple docstring"""
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowercase_ = re.compile(r"^(?P<major>\d+)" r"\.(?P<minor>\d+)" r"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : str
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[Union[str, int]] = None
__UpperCAmelCase : Optional[Union[str, int]] = None
__UpperCAmelCase : Optional[Union[str, int]] = None
def __UpperCAmelCase ( self ):
__a , __a , __a = _str_to_version_tuple(self.version_str )
def __repr__( self ):
return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def __UpperCAmelCase ( self ):
return self.major, self.minor, self.patch
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , _a ):
return Version(_a )
elif isinstance(_a , _a ):
return other
raise TypeError(f'''{other} (type {type(_a )}) cannot be compared to version.''' )
def __eq__( self , _a ):
try:
__a = self._validate_operand(_a )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , _a ):
__a = self._validate_operand(_a )
return self.tuple < other.tuple
def __hash__( self ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __UpperCAmelCase ( cls , _a ):
__a = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __UpperCAmelCase ( self ):
return self.version_str
def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> Tuple:
__a = _VERSION_REG.match(lowerCAmelCase__ )
if not res:
raise ValueError(f'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(lowerCAmelCase__ ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def lowercase ( lowerCAmelCase__ : str ) -> Optional[Any]:
return ".".join(str(lowerCAmelCase__ ) for v in version_tuple )
| 369 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = DistilBertTokenizer
__UpperCAmelCase : Any = DistilBertTokenizerFast
__UpperCAmelCase : int = True
@slow
def __UpperCAmelCase ( self ):
__a = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 11 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['MobileViTFeatureExtractor']
_a = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 17 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
a__ : Tuple = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def UpperCAmelCase__ (lowerCAmelCase_=None ):
'''simple docstring'''
if subparsers is not None:
__SCREAMING_SNAKE_CASE = subparsers.add_parser("tpu-config" , description=_description )
else:
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
__SCREAMING_SNAKE_CASE = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=lowerCAmelCase_ , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=lowerCAmelCase_ , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
__SCREAMING_SNAKE_CASE = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=lowerCAmelCase_ , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__SCREAMING_SNAKE_CASE = defaults.command_file
if not args.command and defaults.commands is not None:
__SCREAMING_SNAKE_CASE = defaults.commands
if not args.tpu_name:
__SCREAMING_SNAKE_CASE = defaults.tpu_name
if not args.tpu_zone:
__SCREAMING_SNAKE_CASE = defaults.tpu_zone
if args.accelerate_version == "dev":
__SCREAMING_SNAKE_CASE = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
__SCREAMING_SNAKE_CASE = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = f"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
__SCREAMING_SNAKE_CASE = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__SCREAMING_SNAKE_CASE = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [f"""pip install {args.accelerate_version}"""]
new_cmd += args.command
__SCREAMING_SNAKE_CASE = "; ".join(lowerCAmelCase_ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__SCREAMING_SNAKE_CASE = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"""Running {' '.join(lowerCAmelCase_ )}""" )
return
subprocess.run(lowerCAmelCase_ )
print("Successfully setup pod." )
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tpu_command_parser()
__SCREAMING_SNAKE_CASE = parser.parse_args()
tpu_command_launcher(lowerCAmelCase_ )
| 54 | 0 |
"""simple docstring"""
from collections import defaultdict
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple ) -> bool:
'''simple docstring'''
lowercase = first_str.lower().strip()
lowercase = second_str.lower().strip()
# Remove whitespace
lowercase = first_str.replace(""" """ , """""" )
lowercase = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
return False
# Default values for count should be 0
lowercase = defaultdict(lowerCamelCase_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowerCamelCase_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowerCAmelCase : Optional[int] =input("""Enter the first string """).strip()
__lowerCAmelCase : List[str] =input("""Enter the second string """).strip()
__lowerCAmelCase : Optional[Any] =check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {'' if status else 'not '}anagrams.""")
| 367 | """simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase : List[Any] =numpy.array([0, 0])
__lowerCAmelCase : List[str] =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase : List[Any] =numpy.array([1, 0])
__lowerCAmelCase : int =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] , lowerCAmelCase__ :int ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase = initial_vectors
for _ in range(lowerCAmelCase__ ):
lowercase = iteration_step(lowerCAmelCase__ )
return vectors
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase = []
for i, start_vector in enumerate(vectors[:-1] ):
lowercase = vectors[i + 1]
new_vectors.append(lowerCAmelCase__ )
lowercase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCAmelCase__ ( lowerCAmelCase__ :numpy.ndarray , lowerCAmelCase__ :float ) -> numpy.ndarray:
'''simple docstring'''
lowercase = numpy.radians(lowerCAmelCase__ )
lowercase , lowercase = numpy.cos(lowerCAmelCase__ ), numpy.sin(lowerCAmelCase__ )
lowercase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> None:
'''simple docstring'''
lowercase = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowercase , lowercase = zip(*lowerCAmelCase__ )
plt.plot(lowerCAmelCase__ , lowerCAmelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Optional[int] =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 32 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _A ( UpperCamelCase_ : Optional[Any]) -> Union[str, Any]:
'''simple docstring'''
__lowercase = 384
__lowercase = 7
if "tiny" in model_name:
__lowercase = 96
__lowercase = (2, 2, 6, 2)
__lowercase = (3, 6, 12, 24)
elif "small" in model_name:
__lowercase = 96
__lowercase = (2, 2, 18, 2)
__lowercase = (3, 6, 12, 24)
elif "base" in model_name:
__lowercase = 128
__lowercase = (2, 2, 18, 2)
__lowercase = (4, 8, 16, 32)
__lowercase = 12
__lowercase = 512
elif "large" in model_name:
__lowercase = 192
__lowercase = (2, 2, 18, 2)
__lowercase = (6, 12, 24, 48)
__lowercase = 12
__lowercase = 768
# set label information
__lowercase = 150
__lowercase = '''huggingface/label-files'''
__lowercase = '''ade20k-id2label.json'''
__lowercase = json.load(open(hf_hub_download(__a, __a, repo_type="dataset"), "r"))
__lowercase = {int(__a): v for k, v in idalabel.items()}
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = SwinConfig(
embed_dim=__a, depths=__a, num_heads=__a, window_size=__a, out_features=["stage1", "stage2", "stage3", "stage4"], )
__lowercase = UperNetConfig(
backbone_config=__a, auxiliary_in_channels=__a, num_labels=__a, idalabel=__a, labelaid=__a, )
return config
def _A ( UpperCamelCase_ : List[str]) -> Any:
'''simple docstring'''
__lowercase = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight"))
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias"))
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight"))
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias"))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias"""))
if i < 3:
rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight"""))
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight"""))
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias"""))
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight"""))
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias"""))
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
])
# fmt: on
return rename_keys
def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : Optional[int], UpperCamelCase_ : Tuple) -> Tuple:
'''simple docstring'''
__lowercase = dct.pop(__a)
__lowercase = val
def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : str) -> str:
'''simple docstring'''
__lowercase = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
for i in range(len(backbone_config.depths)):
__lowercase = num_features[i]
for j in range(backbone_config.depths[i]):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""")
__lowercase = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""")
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:dim, :]
__lowercase = in_proj_bias[: dim]
__lowercase = in_proj_weight[
dim : dim * 2, :
]
__lowercase = in_proj_bias[
dim : dim * 2
]
__lowercase = in_proj_weight[
-dim :, :
]
__lowercase = in_proj_bias[-dim :]
# fmt: on
def _A ( UpperCamelCase_ : Union[str, Any]) -> Optional[Any]:
'''simple docstring'''
__lowercase = x.shape
__lowercase = x.reshape(__a, 4, in_channel // 4)
__lowercase = x[:, [0, 2, 1, 3], :].transpose(1, 2).reshape(__a, __a)
return x
def _A ( UpperCamelCase_ : Optional[int]) -> Tuple:
'''simple docstring'''
__lowercase = x.shape
__lowercase = x.reshape(__a, in_channel // 4, 4)
__lowercase = x[:, :, [0, 2, 1, 3]].transpose(1, 2).reshape(__a, __a)
return x
def _A ( UpperCamelCase_ : List[str]) -> Tuple:
'''simple docstring'''
__lowercase = x.shape[0]
__lowercase = x.reshape(4, in_channel // 4)
__lowercase = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(__a)
return x
def _A ( UpperCamelCase_ : int) -> Any:
'''simple docstring'''
__lowercase = x.shape[0]
__lowercase = x.reshape(in_channel // 4, 4)
__lowercase = x[:, [0, 2, 1, 3]].transpose(0, 1).reshape(__a)
return x
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Dict, UpperCamelCase_ : Union[str, Any]) -> List[str]:
'''simple docstring'''
__lowercase = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
__lowercase = model_name_to_url[model_name]
__lowercase = torch.hub.load_state_dict_from_url(__a, map_location="cpu", file_name=__a)[
'''state_dict'''
]
for name, param in state_dict.items():
print(__a, param.shape)
__lowercase = get_upernet_config(__a)
__lowercase = UperNetForSemanticSegmentation(__a)
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(__a)
if "bn" in key:
__lowercase = key.replace("bn", "batch_norm")
__lowercase = val
# rename keys
__lowercase = create_rename_keys(__a)
for src, dest in rename_keys:
rename_key(__a, __a, __a)
read_in_q_k_v(__a, config.backbone_config)
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__lowercase = reverse_correct_unfold_reduction_order(__a)
if "norm" in key:
__lowercase = reverse_correct_unfold_norm_order(__a)
model.load_state_dict(__a)
# verify on image
__lowercase = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
__lowercase = Image.open(requests.get(__a, stream=__a).raw).convert("RGB")
__lowercase = SegformerImageProcessor()
__lowercase = processor(__a, return_tensors="pt").pixel_values
with torch.no_grad():
__lowercase = model(__a)
__lowercase = outputs.logits
print(logits.shape)
print("First values of logits:", logits[0, 0, :3, :3])
# assert values
if model_name == "upernet-swin-tiny":
__lowercase = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]])
elif model_name == "upernet-swin-small":
__lowercase = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]])
elif model_name == "upernet-swin-base":
__lowercase = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]])
elif model_name == "upernet-swin-large":
__lowercase = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]])
print("Logits:", outputs.logits[0, 0, :3, :3])
assert torch.allclose(outputs.logits[0, 0, :3, :3], __a, atol=1E-4)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(__a)
print(F"""Saving processor to {pytorch_dump_folder_path}""")
processor.save_pretrained(__a)
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""")
model.push_to_hub(F"""openmmlab/{model_name}""")
processor.push_to_hub(F"""openmmlab/{model_name}""")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[F"upernet-swin-{size}" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_a = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 17 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Dict ) -> Tuple:
"""simple docstring"""
_a : Any = R'''\w+[.]\d+'''
_a : Union[str, Any] = re.findall(__a ,__a )
for pat in pats:
_a : int = key.replace(__a ,'''_'''.join(pat.split('''.''' ) ) )
return key
def __UpperCAmelCase ( __a : List[str] ,__a : Union[str, Any] ,__a : Optional[int] ) -> Tuple:
"""simple docstring"""
_a : Dict = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_a : Dict = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_a : Optional[int] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_a : Union[str, Any] = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_a : Tuple = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_a : List[str] = pt_tensor.transpose(2 ,3 ,1 ,0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_a : Dict = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
_a : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_a : Dict = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_a : Union[str, Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCAmelCase ( __a : Dict ,__a : str ,__a : str=42 ) -> Optional[int]:
"""simple docstring"""
_a : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_a : List[Any] = flax_model.init_weights(PRNGKey(__a ) )
_a : Optional[int] = flatten_dict(__a )
_a : Tuple = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_a : List[str] = rename_key(__a )
_a : Optional[Any] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
_a , _a : List[str] = rename_key_and_reshape_tensor(__a ,__a ,__a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
_a : Dict = jnp.asarray(__a )
return unflatten_dict(__a )
| 235 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''OwlViTFeatureExtractor''']
lowerCAmelCase__ = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 | 1 |
"""simple docstring"""
import math
def __magic_name__ ( __snake_case : int ) -> List[str]:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __magic_name__ ( __snake_case : float = 0.1 ) -> List[Any]:
lowercase : Optional[Any] = 3
lowercase : List[str] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__UpperCamelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 202 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ : List[Any] = logging.get_logger(__name__)
def UpperCamelCase( __UpperCamelCase : List[str] ):
lowerCAmelCase_ : Any = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
lowerCAmelCase_ : Any = 1024
lowerCAmelCase_ : Tuple = 4096
lowerCAmelCase_ : List[Any] = 24
lowerCAmelCase_ : int = 16
lowerCAmelCase_ : Tuple = [5, 11, 17, 23]
lowerCAmelCase_ : Optional[Any] = [256, 512, 1024, 1024]
lowerCAmelCase_ : str = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCAmelCase_ : Union[str, Any] = 768
lowerCAmelCase_ : int = [1, 1, 1, 0.5]
lowerCAmelCase_ : List[str] = [256, 512, 768, 768]
lowerCAmelCase_ : int = 150
lowerCAmelCase_ : Any = 16
lowerCAmelCase_ : Any = (1, 384, 384)
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Union[str, Any] = '''project'''
if "ade" in checkpoint_url:
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : Any = 768
lowerCAmelCase_ : Optional[int] = [1, 1, 1, 0.5]
lowerCAmelCase_ : Union[str, Any] = 150
lowerCAmelCase_ : Any = 16
lowerCAmelCase_ : Any = '''huggingface/label-files'''
lowerCAmelCase_ : Any = '''ade20k-id2label.json'''
lowerCAmelCase_ : str = json.load(open(cached_download(hf_hub_url(__UpperCamelCase ,__UpperCamelCase ,repo_type='''dataset''' ) ) ,'''r''' ) )
lowerCAmelCase_ : Optional[Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[Any] = idalabel
lowerCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : List[Any] = [1, 150, 480, 480]
return config, expected_shape
def UpperCamelCase( __UpperCamelCase : Optional[int] ):
lowerCAmelCase_ : int = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : int ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCAmelCase_ : Dict = name.replace('''pretrained.model''' ,'''dpt.encoder''' )
if "pretrained.model" in name:
lowerCAmelCase_ : List[Any] = name.replace('''pretrained.model''' ,'''dpt.embeddings''' )
if "patch_embed" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''patch_embed''' ,'''''' )
if "pos_embed" in name:
lowerCAmelCase_ : Dict = name.replace('''pos_embed''' ,'''position_embeddings''' )
if "attn.proj" in name:
lowerCAmelCase_ : Any = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "proj" in name and "project" not in name:
lowerCAmelCase_ : Tuple = name.replace('''proj''' ,'''projection''' )
if "blocks" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''blocks''' ,'''layer''' )
if "mlp.fc1" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "norm1" in name and "backbone" not in name:
lowerCAmelCase_ : List[str] = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''norm2''' ,'''layernorm_after''' )
if "scratch.output_conv" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''scratch.output_conv''' ,'''head''' )
if "scratch" in name:
lowerCAmelCase_ : Dict = name.replace('''scratch''' ,'''neck''' )
if "layer1_rn" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''layer1_rn''' ,'''convs.0''' )
if "layer2_rn" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''layer2_rn''' ,'''convs.1''' )
if "layer3_rn" in name:
lowerCAmelCase_ : List[Any] = name.replace('''layer3_rn''' ,'''convs.2''' )
if "layer4_rn" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''layer4_rn''' ,'''convs.3''' )
if "refinenet" in name:
lowerCAmelCase_ : List[str] = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCAmelCase_ : Dict = name.replace(f"""refinenet{layer_idx}""" ,f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
lowerCAmelCase_ : int = name.replace('''out_conv''' ,'''projection''' )
if "resConfUnit1" in name:
lowerCAmelCase_ : Dict = name.replace('''resConfUnit1''' ,'''residual_layer1''' )
if "resConfUnit2" in name:
lowerCAmelCase_ : str = name.replace('''resConfUnit2''' ,'''residual_layer2''' )
if "conv1" in name:
lowerCAmelCase_ : str = name.replace('''conv1''' ,'''convolution1''' )
if "conv2" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''conv2''' ,'''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCAmelCase_ : Dict = name.replace('''pretrained.act_postprocess1.0.project.0''' ,'''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''pretrained.act_postprocess2.0.project.0''' ,'''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCAmelCase_ : Tuple = name.replace('''pretrained.act_postprocess3.0.project.0''' ,'''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCAmelCase_ : Dict = name.replace('''pretrained.act_postprocess4.0.project.0''' ,'''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCAmelCase_ : List[Any] = name.replace('''pretrained.act_postprocess1.3''' ,'''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
lowerCAmelCase_ : Dict = name.replace('''pretrained.act_postprocess1.4''' ,'''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
lowerCAmelCase_ : List[Any] = name.replace('''pretrained.act_postprocess2.3''' ,'''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
lowerCAmelCase_ : List[str] = name.replace('''pretrained.act_postprocess2.4''' ,'''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''pretrained.act_postprocess3.3''' ,'''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
lowerCAmelCase_ : List[str] = name.replace('''pretrained.act_postprocess4.3''' ,'''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''pretrained.act_postprocess4.4''' ,'''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
lowerCAmelCase_ : Tuple = name.replace('''pretrained''' ,'''dpt''' )
if "bn" in name:
lowerCAmelCase_ : Dict = name.replace('''bn''' ,'''batch_norm''' )
if "head" in name:
lowerCAmelCase_ : Any = name.replace('''head''' ,'''head.head''' )
if "encoder.norm" in name:
lowerCAmelCase_ : Tuple = name.replace('''encoder.norm''' ,'''layernorm''' )
if "auxlayer" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''auxlayer''' ,'''auxiliary_head.head''' )
if "backbone" in name:
lowerCAmelCase_ : List[Any] = name.replace('''backbone''' ,'''backbone.bit.encoder''' )
if ".." in name:
lowerCAmelCase_ : List[Any] = name.replace('''..''' ,'''.''' )
if "stem.conv" in name:
lowerCAmelCase_ : str = name.replace('''stem.conv''' ,'''bit.embedder.convolution''' )
if "blocks" in name:
lowerCAmelCase_ : List[str] = name.replace('''blocks''' ,'''layers''' )
if "convolution" in name and "backbone" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''convolution''' ,'''conv''' )
if "layer" in name and "backbone" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''layer''' ,'''layers''' )
if "backbone.bit.encoder.bit" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''backbone.bit.encoder.bit''' ,'''backbone.bit''' )
if "embedder.conv" in name:
lowerCAmelCase_ : str = name.replace('''embedder.conv''' ,'''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
lowerCAmelCase_ : Dict = name.replace('''backbone.bit.encoder.stem.norm''' ,'''backbone.bit.embedder.norm''' )
return name
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : Dict = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
lowerCAmelCase_ : Tuple = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : str = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase_ : str = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : Any = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : Dict = in_proj_bias[-config.hidden_size :]
def UpperCamelCase( ):
lowerCAmelCase_ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ : Dict = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : str ):
lowerCAmelCase_ , lowerCAmelCase_ : Any = get_dpt_config(__UpperCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCAmelCase_ : List[str] = torch.load(__UpperCamelCase ,map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(__UpperCamelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase_ : Any = state_dict.pop(__UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = val
# read in qkv matrices
read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase )
# load HuggingFace model
lowerCAmelCase_ : List[Any] = DPTForSemanticSegmentation(__UpperCamelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# Check outputs on an image
lowerCAmelCase_ : Tuple = 480 if '''ade''' in checkpoint_url else 384
lowerCAmelCase_ : Optional[int] = DPTImageProcessor(size=__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = prepare_img()
lowerCAmelCase_ : str = image_processor(__UpperCamelCase ,return_tensors='''pt''' )
# forward pass
lowerCAmelCase_ : Tuple = model(**__UpperCamelCase ).logits if '''ade''' in checkpoint_url else model(**__UpperCamelCase ).predicted_depth
if show_prediction:
lowerCAmelCase_ : Optional[Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) ,size=(image.size[1], image.size[0]) ,mode='''bicubic''' ,align_corners=__UpperCamelCase ,)
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
A__ : Union[str, Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 103 | 0 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
__A : Optional[Any] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
__A : Dict = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
__A : Optional[int] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ (self : Tuple):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32")),
"references": datasets.Sequence(datasets.Value("int32")),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32"),
"references": datasets.Value("int32"),
}) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def SCREAMING_SNAKE_CASE__ (self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=1 , __SCREAMING_SNAKE_CASE : Union[str, Any]="binary" , __SCREAMING_SNAKE_CASE : Any=None):
A = fa_score(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , pos_label=__SCREAMING_SNAKE_CASE , average=__SCREAMING_SNAKE_CASE , sample_weight=__SCREAMING_SNAKE_CASE)
return {"f1": float(__SCREAMING_SNAKE_CASE) if score.size == 1 else score}
| 57 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : str = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 57 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.