code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
def _snake_case ( UpperCamelCase : int = 1000000 ):
UpperCAmelCase : Tuple = 1
UpperCAmelCase : Optional[Any] = 1
UpperCAmelCase : Any = {1: 1}
for inputa in range(2 , UpperCamelCase ):
UpperCAmelCase : Any = 0
UpperCAmelCase : Tuple = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
UpperCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
UpperCAmelCase : List[str] = counter
if counter > pre_counter:
UpperCAmelCase : Optional[Any] = inputa
UpperCAmelCase : Tuple = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 109 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
A: Optional[Any] = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
A: Optional[int] = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
A: int = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
UpperCAmelCase : Tuple = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
UpperCAmelCase : Optional[Any] = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE )
return score
| 109 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : str = KandinskyVaaControlnetPipeline
__A : Union[str, Any] = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
__A : Tuple = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
__A : Optional[int] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__A : Union[str, Any] = False
@property
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
return 32
@property
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
return 32
@property
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
return self.time_input_dim
@property
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return 100
@property
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0)
a__ : List[str] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
a__ : Optional[int] = UNetaDConditionModel(**lowercase)
return model
@property
def __lowercase ( self) -> List[str]:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self) -> Dict:
'''simple docstring'''
torch.manual_seed(0)
a__ : Any = VQModel(**self.dummy_movq_kwargs)
return model
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Tuple = self.dummy_unet
a__ : Optional[int] = self.dummy_movq
a__ : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowercase , set_alpha_to_one=lowercase , steps_offset=1 , prediction_type='epsilon' , thresholding=lowercase , )
a__ : Any = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __lowercase ( self , lowercase , lowercase=0) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase)).to(lowercase)
a__ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowercase)
# create hint
a__ : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase)).to(lowercase)
if str(lowercase).startswith('mps'):
a__ : str = torch.manual_seed(lowercase)
else:
a__ : int = torch.Generator(device=lowercase).manual_seed(lowercase)
a__ : Optional[int] = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : int = 'cpu'
a__ : int = self.get_dummy_components()
a__ : int = self.pipeline_class(**lowercase)
a__ : Optional[Any] = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__ : Any = pipe(**self.get_dummy_inputs(lowercase))
a__ : int = output.images
a__ : str = pipe(
**self.get_dummy_inputs(lowercase) , return_dict=lowercase , )[0]
a__ : List[Any] = image[0, -3:, -3:, -1]
a__ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ : List[str] = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy')
a__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png')
a__ : Optional[Any] = torch.from_numpy(np.array(lowercase)).float() / 2_55.0
a__ : Union[str, Any] = hint.permute(2 , 0 , 1).unsqueeze(0)
a__ : str = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa)
pipe_prior.to(lowercase)
a__ : Tuple = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa)
a__ : Optional[int] = pipeline.to(lowercase)
pipeline.set_progress_bar_config(disable=lowercase)
a__ : Dict = 'A robot, 4k photo'
a__ : Tuple = torch.Generator(device='cuda').manual_seed(0)
a__ , a__ : Optional[Any] = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
a__ : List[Any] = torch.Generator(device='cuda').manual_seed(0)
a__ : Tuple = pipeline(
image_embeds=lowercase , negative_image_embeds=lowercase , hint=lowercase , generator=lowercase , num_inference_steps=100 , output_type='np' , )
a__ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowercase , lowercase)
| 225 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def A_ ( A__ ) -> Optional[int]:
if is_torch_version('<' , '2.0.0' ) or not hasattr(A__ , '_dynamo' ):
return False
return isinstance(A__ , torch._dynamo.eval_frame.OptimizedModule )
def A_ ( A__ , A__ = True ) -> int:
a__ : Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
a__ : Union[str, Any] = is_compiled_module(A__ )
if is_compiled:
a__ : List[str] = model
a__ : Dict = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(A__ , A__ ):
a__ : str = model.module
if not keep_fpaa_wrapper:
a__ : Union[str, Any] = getattr(A__ , 'forward' )
a__ : List[Any] = model.__dict__.pop('_original_forward' , A__ )
if original_forward is not None:
while hasattr(A__ , '__wrapped__' ):
a__ : int = forward.__wrapped__
if forward == original_forward:
break
a__ : List[Any] = forward
if getattr(A__ , '_converted_to_transformer_engine' , A__ ):
convert_model(A__ , to_transformer_engine=A__ )
if is_compiled:
a__ : List[str] = model
a__ : Any = compiled_model
return model
def A_ ( ) -> int:
PartialState().wait_for_everyone()
def A_ ( A__ , A__ ) -> Dict:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(A__ , A__ )
elif PartialState().local_process_index == 0:
torch.save(A__ , A__ )
@contextmanager
def A_ ( **A__ ) -> Any:
for key, value in kwargs.items():
a__ : Optional[int] = str(A__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def A_ ( A__ ) -> List[str]:
if not hasattr(A__ , '__qualname__' ) and not hasattr(A__ , '__name__' ):
a__ : Dict = getattr(A__ , '__class__' , A__ )
if hasattr(A__ , '__qualname__' ):
return obj.__qualname__
if hasattr(A__ , '__name__' ):
return obj.__name__
return str(A__ )
def A_ ( A__ , A__ ) -> Dict:
for key, value in source.items():
if isinstance(A__ , A__ ):
a__ : Optional[Any] = destination.setdefault(A__ , {} )
merge_dicts(A__ , A__ )
else:
a__ : Optional[int] = value
return destination
def A_ ( A__ = None ) -> bool:
if port is None:
a__ : List[Any] = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 225 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : str = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ = 1_00_00_00 ) -> int:
__lowerCamelCase = set(range(3 , UpperCamelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCamelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCamelCase__ , UpperCamelCase__ ) ) )
__lowerCamelCase = [float(UpperCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCamelCase__ , limit + 1 , UpperCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 67 | 0 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = BioGptTokenizer
__SCREAMING_SNAKE_CASE : Tuple = False
def __lowerCAmelCase ( self ) ->List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : int = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE : Any = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE : Optional[Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any:
SCREAMING_SNAKE_CASE : List[str] = '''lower newer'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''lower newer'''
return input_text, output_text
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : str = BioGptTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE : Optional[Any] = '''lower'''
SCREAMING_SNAKE_CASE : Optional[Any] = ['''low''', '''er</w>''']
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = tokens + ['''<unk>''']
SCREAMING_SNAKE_CASE : Any = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
@slow
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : int = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 19 |
from math import pi, sqrt, tan
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
SCREAMING_SNAKE_CASE : Optional[Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(a__ , 2 ) * torus_radius * tube_radius
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
SCREAMING_SNAKE_CASE : int = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE : List[str] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F"Rectangle: {area_rectangle(10, 20) = }")
print(F"Square: {area_square(10) = }")
print(F"Triangle: {area_triangle(10, 10) = }")
print(F"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(F"Parallelogram: {area_parallelogram(10, 20) = }")
print(F"Rhombus: {area_rhombus(10, 20) = }")
print(F"Trapezium: {area_trapezium(10, 20, 30) = }")
print(F"Circle: {area_circle(20) = }")
print(F"Ellipse: {area_ellipse(10, 20) = }")
print('''\nSurface Areas of various geometric shapes: \n''')
print(F"Cube: {surface_area_cube(20) = }")
print(F"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(F"Sphere: {surface_area_sphere(20) = }")
print(F"Hemisphere: {surface_area_hemisphere(20) = }")
print(F"Cone: {surface_area_cone(10, 20) = }")
print(F"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(F"Cylinder: {surface_area_cylinder(10, 20) = }")
print(F"Torus: {surface_area_torus(20, 10) = }")
print(F"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(F"Square: {area_reg_polygon(4, 10) = }")
print(F"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 19 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : Dict = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = ['''MobileNetV2FeatureExtractor''']
UpperCAmelCase_ : Union[str, Any] = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38 |
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 91 | 0 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowercase_ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a ):
super().__init__()
__a = torchvision.models.resnetaaa(pretrained=_a )
__a = list(model.children() )[:-2]
__a = nn.Sequential(*_a )
__a = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __UpperCAmelCase ( self , _a ):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
__a = self.pool(self.model(_a ) )
__a = torch.flatten(_a , start_dim=2 )
__a = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a ):
__a = [json.loads(_a ) for l in open(_a )]
__a = os.path.dirname(_a )
__a = tokenizer
__a = labels
__a = len(_a )
__a = max_seq_length
__a = transforms
def __len__( self ):
return len(self.data )
def __getitem__( self , _a ):
__a = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=_a ) )
__a , __a , __a = sentence[0], sentence[1:-1], sentence[-1]
__a = sentence[: self.max_seq_length]
__a = torch.zeros(self.n_classes )
__a = 1
__a = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
__a = self.transforms(_a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __UpperCAmelCase ( self ):
__a = Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def lowercase ( lowerCAmelCase__ : List[str] ) -> int:
__a = [len(row['''sentence'''] ) for row in batch]
__a , __a = len(lowerCAmelCase__ ), max(lowerCAmelCase__ )
__a = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.long )
__a = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(lowerCAmelCase__ , lowerCAmelCase__ ) ):
__a = input_row['''sentence''']
__a = 1
__a = torch.stack([row['''image'''] for row in batch] )
__a = torch.stack([row['''label'''] for row in batch] )
__a = torch.stack([row['''image_start_token'''] for row in batch] )
__a = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowercase ( ) -> int:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowercase ( ) -> str:
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 353 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
__a = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = 1
__a = FrozenDict(_a )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
__a = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = True
__a = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , )
def __UpperCAmelCase ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __UpperCAmelCase ( self ):
self.enable_attention_slicing(_a )
def __UpperCAmelCase ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__a = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ):
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _a , _a , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
__a = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
__a = self.segmentation_model(**_a )
__a = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__a = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__a = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
| 11 | 0 |
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
_lowerCamelCase : Any = False
_lowerCamelCase : Union[str, Any] = False
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
return TrainCommand(_UpperCAmelCase )
class lowercase ( __UpperCAmelCase):
@staticmethod
def a_ ( _lowerCamelCase : ArgumentParser ):
"""simple docstring"""
A_ : str = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=_lowerCamelCase , required=_lowerCamelCase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=_lowerCamelCase , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=_lowerCamelCase , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=_lowerCamelCase , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=_lowerCamelCase , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=_lowerCamelCase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=_lowerCamelCase , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=_lowerCamelCase , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=_lowerCamelCase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=_lowerCamelCase , default=32 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=_lowerCamelCase , default=64 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=_lowerCamelCase , default=3E-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=_lowerCamelCase , default=1E-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__( self : Any , _lowerCamelCase : Namespace ):
"""simple docstring"""
A_ : Union[str, Any] = logging.get_logger('''transformers-cli/training''' )
A_ : Tuple = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=_lowerCamelCase )
A_ : Union[str, Any] = args.output
A_ : List[str] = args.column_label
A_ : Union[str, Any] = args.column_text
A_ : str = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
A_ : List[Any] = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
A_ : Tuple = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A_ : List[Any] = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
A_ : Tuple = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A_ : str = args.validation_split
A_ : Dict = args.train_batch_size
A_ : Optional[int] = args.valid_batch_size
A_ : List[Any] = args.learning_rate
A_ : List[Any] = args.adam_epsilon
def a_ ( self : Optional[Any] ):
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def a_ ( self : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError
def a_ ( self : Any ):
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 167 |
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase ( __UpperCAmelCase , __UpperCAmelCase):
__lowerCAmelCase : List[Any] = 1
@register_to_config
def __init__( self : Union[str, Any] , _lowerCamelCase : int = 10_00 , _lowerCamelCase : Optional[Union[np.ndarray, List[float]]] = None ):
"""simple docstring"""
self.set_timesteps(_lowerCamelCase )
# standard deviation of the initial noise distribution
A_ : Optional[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
A_ : List[str] = 4
# running values
A_ : Optional[int] = []
def a_ ( self : str , _lowerCamelCase : int , _lowerCamelCase : Union[str, torch.device] = None ):
"""simple docstring"""
A_ : Tuple = num_inference_steps
A_ : Tuple = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
A_ : Optional[Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
A_ : str = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
A_ : int = torch.sin(steps * math.pi / 2 ) ** 2
A_ : Dict = (1.0 - self.betas**2) ** 0.5
A_ : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
A_ : int = timesteps.to(_lowerCamelCase )
A_ : int = []
def a_ ( self : int , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : int , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : bool = True , ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
A_ : Union[str, Any] = (self.timesteps == timestep).nonzero().item()
A_ : Dict = timestep_index + 1
A_ : Union[str, Any] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_lowerCamelCase )
if len(self.ets ) == 1:
A_ : Dict = self.ets[-1]
elif len(self.ets ) == 2:
A_ : List[str] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
A_ : Optional[Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
A_ : str = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
A_ : Union[str, Any] = self._get_prev_sample(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCamelCase )
def a_ ( self : Any , _lowerCamelCase : torch.FloatTensor , *_lowerCamelCase : List[str] , **_lowerCamelCase : List[Any] ):
"""simple docstring"""
return sample
def a_ ( self : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.alphas[timestep_index]
A_ : Union[str, Any] = self.betas[timestep_index]
A_ : int = self.alphas[prev_timestep_index]
A_ : Tuple = self.betas[prev_timestep_index]
A_ : str = (sample - sigma * ets) / max(_lowerCamelCase , 1E-8 )
A_ : List[str] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : List[str] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 167 | 1 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class snake_case ( datasets.BuilderConfig ):
SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None
class snake_case ( datasets.ArrowBasedBuilder ):
SCREAMING_SNAKE_CASE_ : Tuple = PandasConfig
def lowercase_ ( self : List[Any])-> int:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def lowercase_ ( self : Any , UpperCamelCase__ : int)-> List[str]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
__lowerCAmelCase: Union[str, Any] = dl_manager.download_and_extract(self.config.data_files)
if isinstance(UpperCamelCase__ , (str, list, tuple)):
__lowerCAmelCase: Optional[Any] = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowerCAmelCase: int = [dl_manager.iter_files(UpperCamelCase__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})]
__lowerCAmelCase: Tuple = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowerCAmelCase: int = [dl_manager.iter_files(UpperCamelCase__) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={"files": files}))
return splits
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : pa.Table)-> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowerCAmelCase: Dict = table_cast(UpperCamelCase__ , self.config.features.arrow_schema)
return pa_table
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Tuple)-> List[str]:
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__)):
with open(UpperCamelCase__ , "rb") as f:
__lowerCAmelCase: str = pa.Table.from_pandas(pd.read_pickle(UpperCamelCase__))
yield i, self._cast_table(UpperCamelCase__)
| 108 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
# Load configuration defined in the metadata file
with open(__SCREAMING_SNAKE_CASE ) as metadata_file:
__lowerCAmelCase: List[Any] = json.load(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = LukeConfig(use_entity_aware_attention=__SCREAMING_SNAKE_CASE , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
__lowerCAmelCase: Optional[Any] = torch.load(__SCREAMING_SNAKE_CASE , map_location="cpu" )["module"]
# Load the entity vocab file
__lowerCAmelCase: List[Any] = load_original_entity_vocab(__SCREAMING_SNAKE_CASE )
# add an entry for [MASK2]
__lowerCAmelCase: Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__lowerCAmelCase: Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
__lowerCAmelCase: str = AddedToken("<ent>" , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = AddedToken("<ent2>" , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , "tokenizer_config.json" ) , "r" ) as f:
__lowerCAmelCase: Optional[int] = json.load(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = "MLukeTokenizer"
with open(os.path.join(__SCREAMING_SNAKE_CASE , "tokenizer_config.json" ) , "w" ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = MLukeTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
__lowerCAmelCase: Union[str, Any] = tokenizer.convert_tokens_to_ids(["@"] )[0]
__lowerCAmelCase: Optional[int] = tokenizer.convert_tokens_to_ids(["#"] )[0]
__lowerCAmelCase: Dict = state_dict["embeddings.word_embeddings.weight"]
__lowerCAmelCase: Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
__lowerCAmelCase: int = word_emb[enta_init_index].unsqueeze(0 )
__lowerCAmelCase: str = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__lowerCAmelCase: Dict = state_dict[bias_name]
__lowerCAmelCase: Union[str, Any] = decoder_bias[ent_init_index].unsqueeze(0 )
__lowerCAmelCase: Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
__lowerCAmelCase: Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__lowerCAmelCase: Optional[int] = F"encoder.layer.{layer_index}.attention.self."
__lowerCAmelCase: Tuple = state_dict[prefix + matrix_name]
__lowerCAmelCase: Dict = state_dict[prefix + matrix_name]
__lowerCAmelCase: Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__lowerCAmelCase: int = state_dict["entity_embeddings.entity_embeddings.weight"]
__lowerCAmelCase: Dict = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
__lowerCAmelCase: str = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__lowerCAmelCase: List[str] = state_dict["entity_predictions.bias"]
__lowerCAmelCase: Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
__lowerCAmelCase: str = torch.cat([entity_prediction_bias, entity_mask_bias] )
__lowerCAmelCase: Optional[int] = LukeForMaskedLM(config=__SCREAMING_SNAKE_CASE ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
__lowerCAmelCase: Tuple = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
__lowerCAmelCase: Any = state_dict[key]
else:
__lowerCAmelCase: Tuple = state_dict[key]
__lowerCAmelCase , __lowerCAmelCase: Tuple = model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
if set(__SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"Unexpected unexpected_keys: {unexpected_keys}" )
if set(__SCREAMING_SNAKE_CASE ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__lowerCAmelCase: Tuple = MLukeTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , task="entity_classification" )
__lowerCAmelCase: Tuple = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
__lowerCAmelCase: Optional[Any] = (0, 9)
__lowerCAmelCase: Optional[int] = tokenizer(__SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors="pt" )
__lowerCAmelCase: int = model(**__SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__lowerCAmelCase: Dict = torch.Size((1, 3_3, 7_6_8) )
__lowerCAmelCase: Optional[int] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__lowerCAmelCase: Union[str, Any] = torch.Size((1, 1, 7_6_8) )
__lowerCAmelCase: Any = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
__lowerCAmelCase: Tuple = MLukeTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[Any] = "Tokyo is the capital of <mask>."
__lowerCAmelCase: List[str] = (2_4, 3_0)
__lowerCAmelCase: int = tokenizer(__SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors="pt" )
__lowerCAmelCase: Union[str, Any] = model(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = encoding["input_ids"][0].tolist()
__lowerCAmelCase: int = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
__lowerCAmelCase: Optional[Any] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = outputs.entity_logits[0][0].argmax().item()
__lowerCAmelCase: Union[str, Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__SCREAMING_SNAKE_CASE ) )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Any:
__lowerCAmelCase: Tuple = ["[MASK]", "[PAD]", "[UNK]"]
__lowerCAmelCase: Optional[Any] = [json.loads(__SCREAMING_SNAKE_CASE ) for line in open(__SCREAMING_SNAKE_CASE )]
__lowerCAmelCase: str = {}
for entry in data:
__lowerCAmelCase: Tuple = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__lowerCAmelCase: Optional[int] = entity_id
break
__lowerCAmelCase: Optional[Any] = F"{language}:{entity_name}"
__lowerCAmelCase: Optional[int] = entity_id
return new_mapping
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
__A = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 108 | 1 |
from abc import ABC, abstractmethod
from typing import List, Optional
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self ):
'''simple docstring'''
# test for the above condition
self.test()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = False
while not completed:
if counter == 1:
self.reset()
__lowerCamelCase = self.advance()
if not self.does_advance(__UpperCAmelCase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = self.update(__UpperCAmelCase )
counter += 1
if counter > 10000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowerCamelCase ( self , __UpperCAmelCase=False ):
'''simple docstring'''
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super(__UpperCAmelCase , self ).__init__()
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or len(__UpperCAmelCase ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
__lowerCamelCase = token_ids
__lowerCamelCase = len(self.token_ids )
__lowerCamelCase = -1 # the index of the currently fulfilled step
__lowerCamelCase = False
def lowerCamelCase ( self ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(__UpperCAmelCase )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(__UpperCAmelCase )}""" )
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
if self.does_advance(__UpperCAmelCase ):
self.fulfilled_idx += 1
__lowerCamelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
__lowerCamelCase = True
__lowerCamelCase = completed
else:
# failed to make progress.
__lowerCamelCase = True
self.reset()
return stepped, completed, reset
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = False
__lowerCamelCase = 0
def lowerCamelCase ( self ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def lowerCamelCase ( self , __UpperCAmelCase=False ):
'''simple docstring'''
__lowerCamelCase = PhrasalConstraint(self.token_ids )
if stateful:
__lowerCamelCase = self.seqlen
__lowerCamelCase = self.fulfilled_idx
__lowerCamelCase = self.completed
return new_constraint
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=True ):
'''simple docstring'''
__lowerCamelCase = max([len(__UpperCAmelCase ) for one in nested_token_ids] )
__lowerCamelCase = {}
for token_ids in nested_token_ids:
__lowerCamelCase = root
for tidx, token_id in enumerate(__UpperCAmelCase ):
if token_id not in level:
__lowerCamelCase = {}
__lowerCamelCase = level[token_id]
if no_subsets and self.has_subsets(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F""" {nested_token_ids}.""" )
__lowerCamelCase = root
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.trie
for current_token in current_seq:
__lowerCamelCase = start[current_token]
__lowerCamelCase = list(start.keys() )
return next_tokens
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.next_tokens(__UpperCAmelCase )
return len(__UpperCAmelCase ) == 0
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = list(root.values() )
if len(__UpperCAmelCase ) == 0:
return 1
else:
return sum([self.count_leaves(__UpperCAmelCase ) for nn in next_nodes] )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.count_leaves(__UpperCAmelCase )
return len(__UpperCAmelCase ) != leaf_count
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super(__UpperCAmelCase , self ).__init__()
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or len(__UpperCAmelCase ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(__UpperCAmelCase , __UpperCAmelCase ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
__lowerCamelCase = DisjunctiveTrie(__UpperCAmelCase )
__lowerCamelCase = nested_token_ids
__lowerCamelCase = self.trie.max_height
__lowerCamelCase = []
__lowerCamelCase = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.trie.next_tokens(self.current_seq )
if len(__UpperCAmelCase ) == 0:
return None
else:
return token_list
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(__UpperCAmelCase )}""" )
__lowerCamelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(__UpperCAmelCase )}""" )
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
if self.does_advance(__UpperCAmelCase ):
self.current_seq.append(__UpperCAmelCase )
__lowerCamelCase = True
else:
__lowerCamelCase = True
self.reset()
__lowerCamelCase = self.trie.reached_leaf(self.current_seq )
__lowerCamelCase = completed
return stepped, completed, reset
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = False
__lowerCamelCase = []
def lowerCamelCase ( self ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowerCamelCase ( self , __UpperCAmelCase=False ):
'''simple docstring'''
__lowerCamelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
__lowerCamelCase = self.seqlen
__lowerCamelCase = self.current_seq
__lowerCamelCase = self.completed
return new_constraint
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = constraints
# max # of steps required to fulfill a given constraint
__lowerCamelCase = max([c.seqlen for c in constraints] )
__lowerCamelCase = len(__UpperCAmelCase )
__lowerCamelCase = False
self.init_state()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = None
__lowerCamelCase = [constraint.copy(stateful=__UpperCAmelCase ) for constraint in self.constraints]
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__lowerCamelCase = constraint.advance()
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
token_list.append(__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
token_list.extend(__UpperCAmelCase )
else:
__lowerCamelCase = self.inprogress_constraint.advance()
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
token_list.append(__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
token_list.extend(__UpperCAmelCase )
if len(__UpperCAmelCase ) == 0:
return None
else:
return token_list
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__lowerCamelCase ,__lowerCamelCase = self.add(__UpperCAmelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
__lowerCamelCase ,__lowerCamelCase = False, False
if self.completed:
__lowerCamelCase = True
__lowerCamelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = self.inprogress_constraint.update(__UpperCAmelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__UpperCAmelCase ) )
__lowerCamelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__lowerCamelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
__lowerCamelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(__UpperCAmelCase ):
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = pending_constraint.update(__UpperCAmelCase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(__UpperCAmelCase )
__lowerCamelCase = None
if not complete and stepped:
__lowerCamelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__lowerCamelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__lowerCamelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowerCamelCase ( self , __UpperCAmelCase=True ):
'''simple docstring'''
__lowerCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__lowerCamelCase = [
constraint.copy(stateful=__UpperCAmelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__lowerCamelCase = self.inprogress_constraint.copy(stateful=__UpperCAmelCase )
__lowerCamelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 330 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 16
a_ = 32
def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : int = 16 ):
__lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' )
def tokenize_function(_UpperCamelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCamelCase = datasets.map(
_UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(_UpperCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCamelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCamelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCamelCase = 8
else:
__lowerCamelCase = None
return tokenizer.pad(
_UpperCamelCase ,padding='''longest''' ,max_length=_UpperCamelCase ,pad_to_multiple_of=_UpperCamelCase ,return_tensors='''pt''' ,)
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets['''train'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase )
__lowerCamelCase = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ = mocked_dataloaders # noqa: F811
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,_UpperCamelCase ) == "1":
__lowerCamelCase = 2
# Initialize accelerator
__lowerCamelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config['''lr''']
__lowerCamelCase = int(config['''num_epochs'''] )
__lowerCamelCase = int(config['''seed'''] )
__lowerCamelCase = int(config['''batch_size'''] )
__lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_UpperCamelCase )
def inner_training_loop(_UpperCamelCase : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCamelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCamelCase = AdamW(params=model.parameters() ,lr=_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = get_dataloaders(_UpperCamelCase ,_UpperCamelCase )
# Instantiate scheduler
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase ,num_warmup_steps=1_00 ,num_training_steps=(len(_UpperCamelCase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
__lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_UpperCamelCase ,references=_UpperCamelCase ,)
__lowerCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,_UpperCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ):
__lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' ,type=_UpperCamelCase ,default=_UpperCamelCase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' ,)
parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCamelCase ,_UpperCamelCase )
if __name__ == "__main__":
main()
| 330 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 359 |
'''simple docstring'''
UpperCAmelCase_ = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
UpperCAmelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCAmelCase_ = [None] * 1_0_0_0_0_0_0_0
UpperCAmelCase_ = True
UpperCAmelCase_ = False
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase__ = chain(next_number(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase__ = number_chain
while number < 10000000:
UpperCAmelCase__ = number_chain
number *= 10
return number_chain
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 10000000 ):
'''simple docstring'''
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution() = }")
| 61 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class __snake_case ( __lowerCAmelCase ):
a__ = """xmod"""
def __init__( self , lowercase=3_05_22 , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=None , lowercase=False , lowercase=2 , lowercase=False , lowercase=True , lowercase=True , lowercase=("en_XX",) , lowercase=None , **lowercase , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase)
a__: str = vocab_size
a__: str = hidden_size
a__: List[str] = num_hidden_layers
a__: Union[str, Any] = num_attention_heads
a__: Optional[Any] = hidden_act
a__: int = intermediate_size
a__: int = hidden_dropout_prob
a__: List[str] = attention_probs_dropout_prob
a__: Optional[int] = max_position_embeddings
a__: Optional[int] = type_vocab_size
a__: int = initializer_range
a__: Tuple = layer_norm_eps
a__: Optional[int] = position_embedding_type
a__: Tuple = use_cache
a__: Dict = classifier_dropout
a__: List[Any] = pre_norm
a__: Tuple = adapter_reduction_factor
a__: Dict = adapter_layer_norm
a__: List[str] = adapter_reuse_layer_norm
a__: Tuple = ln_before_adapter
a__: int = list(lowercase)
a__: List[str] = default_language
class __snake_case ( __lowerCAmelCase ):
@property
def lowerCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
a__: List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__: Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 290 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __snake_case ( __lowerCAmelCase ):
a__ = """decision_transformer"""
a__ = ["""past_key_values"""]
a__ = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase=17 , lowercase=4 , lowercase=1_28 , lowercase=40_96 , lowercase=True , lowercase=1 , lowercase=10_24 , lowercase=3 , lowercase=1 , lowercase=None , lowercase="relu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=5_02_56 , lowercase=5_02_56 , lowercase=False , lowercase=False , **lowercase , ) -> Tuple:
'''simple docstring'''
a__: List[str] = state_dim
a__: int = act_dim
a__: List[Any] = hidden_size
a__: List[str] = max_ep_len
a__: List[Any] = action_tanh
a__: Optional[Any] = vocab_size
a__: Tuple = n_positions
a__: Dict = n_layer
a__: Optional[int] = n_head
a__: Optional[int] = n_inner
a__: Any = activation_function
a__: Union[str, Any] = resid_pdrop
a__: Any = embd_pdrop
a__: Any = attn_pdrop
a__: List[Any] = layer_norm_epsilon
a__: Optional[Any] = initializer_range
a__: Any = scale_attn_weights
a__: Dict = use_cache
a__: Optional[int] = scale_attn_by_inverse_layer_idx
a__: List[str] = reorder_and_upcast_attn
a__: Any = bos_token_id
a__: int = eos_token_id
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase)
| 290 | 1 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase ( unittest.TestCase):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_= mock.Mock()
UpperCAmelCase_= 500
UpperCAmelCase_= {}
UpperCAmelCase_= HTTPError
UpperCAmelCase_= {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_= BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=__UpperCAmelCase ) as mock_head:
UpperCAmelCase_= BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_= mock.Mock()
UpperCAmelCase_= 500
UpperCAmelCase_= {}
UpperCAmelCase_= HTTPError
UpperCAmelCase_= {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_= GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=__UpperCAmelCase ) as mock_head:
UpperCAmelCase_= GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
# This test is for deprecated behavior and can be removed in v5
try:
UpperCAmelCase_= tempfile.mktemp()
with open(__UpperCAmelCase , """wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" , __UpperCAmelCase )
UpperCAmelCase_= AlbertTokenizer.from_pretrained(__UpperCAmelCase )
finally:
os.remove(__UpperCAmelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" , """wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" , __UpperCAmelCase )
UpperCAmelCase_= AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase_= AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class lowercase ( unittest.TestCase):
"""simple docstring"""
a__ : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ) -> Optional[Any]:
UpperCAmelCase_= TOKEN
HfFolder.save_token(__UpperCAmelCase )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_= os.path.join(__UpperCAmelCase , """vocab.txt""" )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_= BertTokenizer(__UpperCAmelCase )
tokenizer.push_to_hub("""test-tokenizer""" , use_auth_token=self._token )
UpperCAmelCase_= BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCAmelCase , repo_id="""test-tokenizer""" , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
UpperCAmelCase_= BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_= os.path.join(__UpperCAmelCase , """vocab.txt""" )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_= BertTokenizer(__UpperCAmelCase )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" , use_auth_token=self._token )
UpperCAmelCase_= BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__UpperCAmelCase , repo_id="""valid_org/test-tokenizer-org""" , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
UpperCAmelCase_= BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_= os.path.join(__UpperCAmelCase , """vocab.txt""" )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_= CustomTokenizer(__UpperCAmelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
UpperCAmelCase_= AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=__UpperCAmelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_= os.path.join(__UpperCAmelCase , """vocab.txt""" )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_= BertTokenizerFast.from_pretrained(__UpperCAmelCase )
bert_tokenizer.save_pretrained(__UpperCAmelCase )
UpperCAmelCase_= CustomTokenizerFast.from_pretrained(__UpperCAmelCase )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
UpperCAmelCase_= AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=__UpperCAmelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizerFast""" )
UpperCAmelCase_= AutoTokenizer.from_pretrained(
F"""{USER}/test-dynamic-tokenizer""" , use_fast=__UpperCAmelCase , trust_remote_code=__UpperCAmelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
class lowercase ( unittest.TestCase):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
UpperCAmelCase_= Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase_= Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS]""", """ This is a """, """extra_id_100"""] )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
UpperCAmelCase_= Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) , ["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) , ["""BC""", """A"""] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_= Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
UpperCAmelCase_= Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
UpperCAmelCase_= Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) , ["""AB""", """C"""] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
UpperCAmelCase_= Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) , ["""ABC""", """D"""] )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCAmelCase_= Trie()
UpperCAmelCase_= trie.cut_text("""ABC""" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__UpperCAmelCase , ["""AB""", """C"""] )
| 277 |
import warnings
from functools import wraps
from typing import Callable
def __a ( lowerCAmelCase_ : Callable ) -> Callable:
'''simple docstring'''
@wraps(lowerCAmelCase_ )
def _inner_fn(*lowerCAmelCase_ : List[Any] ,**lowerCAmelCase_ : Tuple ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") ,lowerCAmelCase_ ,)
return fn(*lowerCAmelCase_ ,**lowerCAmelCase_ )
return _inner_fn
| 277 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : str = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
UpperCAmelCase : Union[str, Any] = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(lowercase_ ) , lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> str:
UpperCAmelCase : int = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(lowercase_ ) , x.transpose() ) )
UpperCAmelCase : Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
UpperCAmelCase : str = np.random.randn(3 , 4 )
UpperCAmelCase : int = torch.tensor(lowercase_ )
self.assertTrue(np.allclose(transpose(lowercase_ ) , transpose(lowercase_ ).numpy() ) )
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Union[str, Any] = torch.tensor(lowercase_ )
self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0) ) , transpose(lowercase_ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Dict = np.random.randn(3 , 4 )
UpperCAmelCase : int = tf.constant(lowercase_ )
self.assertTrue(np.allclose(transpose(lowercase_ ) , transpose(lowercase_ ).numpy() ) )
UpperCAmelCase : str = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : str = tf.constant(lowercase_ )
self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0) ) , transpose(lowercase_ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCAmelCase_ ( self : List[str] ) -> Any:
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 )
UpperCAmelCase : List[Any] = jnp.array(lowercase_ )
self.assertTrue(np.allclose(transpose(lowercase_ ) , np.asarray(transpose(lowercase_ ) ) ) )
UpperCAmelCase : Any = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Tuple = jnp.array(lowercase_ )
self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0) ) , np.asarray(transpose(lowercase_ , axes=(1, 2, 0) ) ) ) )
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3) ) , np.reshape(lowercase_ , (4, 3) ) ) )
UpperCAmelCase : Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5) ) , np.reshape(lowercase_ , (12, 5) ) ) )
@require_torch
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
UpperCAmelCase : List[str] = np.random.randn(3 , 4 )
UpperCAmelCase : Union[str, Any] = torch.tensor(lowercase_ )
self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3) ) , reshape(lowercase_ , (4, 3) ).numpy() ) )
UpperCAmelCase : Optional[Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[str] = torch.tensor(lowercase_ )
self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5) ) , reshape(lowercase_ , (12, 5) ).numpy() ) )
@require_tf
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase : str = np.random.randn(3 , 4 )
UpperCAmelCase : Union[str, Any] = tf.constant(lowercase_ )
self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3) ) , reshape(lowercase_ , (4, 3) ).numpy() ) )
UpperCAmelCase : str = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Tuple = tf.constant(lowercase_ )
self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5) ) , reshape(lowercase_ , (12, 5) ).numpy() ) )
@require_flax
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
UpperCAmelCase : Dict = np.random.randn(3 , 4 )
UpperCAmelCase : str = jnp.array(lowercase_ )
self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3) ) , np.asarray(reshape(lowercase_ , (4, 3) ) ) ) )
UpperCAmelCase : List[str] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Optional[Any] = jnp.array(lowercase_ )
self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5) ) , np.asarray(reshape(lowercase_ , (12, 5) ) ) ) )
def UpperCAmelCase_ ( self : Any ) -> int:
UpperCAmelCase : List[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(lowercase_ ) , np.squeeze(lowercase_ ) ) )
UpperCAmelCase : Dict = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2 ) , np.squeeze(lowercase_ , axis=2 ) ) )
@require_torch
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
UpperCAmelCase : int = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : Optional[Any] = torch.tensor(lowercase_ )
self.assertTrue(np.allclose(squeeze(lowercase_ ) , squeeze(lowercase_ ).numpy() ) )
UpperCAmelCase : Dict = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : List[Any] = torch.tensor(lowercase_ )
self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2 ) , squeeze(lowercase_ , axis=2 ).numpy() ) )
@require_tf
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : Optional[Any] = tf.constant(lowercase_ )
self.assertTrue(np.allclose(squeeze(lowercase_ ) , squeeze(lowercase_ ).numpy() ) )
UpperCAmelCase : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : Optional[int] = tf.constant(lowercase_ )
self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2 ) , squeeze(lowercase_ , axis=2 ).numpy() ) )
@require_flax
def UpperCAmelCase_ ( self : List[str] ) -> int:
UpperCAmelCase : Dict = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : int = jnp.array(lowercase_ )
self.assertTrue(np.allclose(squeeze(lowercase_ ) , np.asarray(squeeze(lowercase_ ) ) ) )
UpperCAmelCase : Any = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : int = jnp.array(lowercase_ )
self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2 ) , np.asarray(squeeze(lowercase_ , axis=2 ) ) ) )
def UpperCAmelCase_ ( self : int ) -> Tuple:
UpperCAmelCase : int = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1 ) , np.expand_dims(lowercase_ , axis=1 ) ) )
@require_torch
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
UpperCAmelCase : Dict = np.random.randn(3 , 4 )
UpperCAmelCase : int = torch.tensor(lowercase_ )
self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1 ) , expand_dims(lowercase_ , axis=1 ).numpy() ) )
@require_tf
def UpperCAmelCase_ ( self : Dict ) -> str:
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : List[str] = tf.constant(lowercase_ )
self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1 ) , expand_dims(lowercase_ , axis=1 ).numpy() ) )
@require_flax
def UpperCAmelCase_ ( self : int ) -> Dict:
UpperCAmelCase : Tuple = np.random.randn(3 , 4 )
UpperCAmelCase : Tuple = jnp.array(lowercase_ )
self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1 ) , np.asarray(expand_dims(lowercase_ , axis=1 ) ) ) )
| 151 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : Dict = len(UpperCAmelCase_ ) # No of vertices in graph
UpperCAmelCase : Tuple = [0] * n
UpperCAmelCase : List[Any] = [False] * n
def dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[str] = True
UpperCAmelCase : Dict = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , id_ )
UpperCAmelCase : Optional[Any] = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
UpperCAmelCase : Dict = min(low[at] , low[to] )
UpperCAmelCase : list[tuple[int, int]] = []
for i in range(UpperCAmelCase_ ):
if not visited[i]:
dfs(UpperCAmelCase_ , -1 , UpperCAmelCase_ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151 | 1 |
"""simple docstring"""
__A = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _lowerCamelCase() -> Optional[Any]:
_lowerCAmelCase =input("""Enter message: """ )
_lowerCAmelCase =input("""Enter key [alphanumeric]: """ )
_lowerCAmelCase =input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
_lowerCAmelCase ='''encrypt'''
_lowerCAmelCase =encrypt_message(__UpperCamelCase , __UpperCamelCase )
elif mode.lower().startswith("""d""" ):
_lowerCAmelCase ='''decrypt'''
_lowerCAmelCase =decrypt_message(__UpperCamelCase , __UpperCamelCase )
print(F'''\n{mode.title()}ed message:''' )
print(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any:
return translate_message(__UpperCamelCase , __UpperCamelCase , """encrypt""" )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> str:
return translate_message(__UpperCamelCase , __UpperCamelCase , """decrypt""" )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =key.upper()
for symbol in message:
_lowerCAmelCase =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__UpperCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__UpperCamelCase ):
_lowerCAmelCase =0
else:
translated.append(__UpperCamelCase )
return "".join(__UpperCamelCase )
if __name__ == "__main__":
main()
| 371 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = '▁'
__A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__A = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__A = {'vinai/bartpho-syllable': 1024}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =vocab_file
_lowerCAmelCase =monolingual_vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_lowerCAmelCase ={}
_lowerCAmelCase =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =cnt
cnt += 1
with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_lowerCAmelCase =line.strip().split()[0]
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
_lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
_lowerCAmelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCAmelCase ) -> List[Any]:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
_lowerCAmelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
return self.fairseq_ids_to_tokens[index]
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__UpperCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 341 | 0 |
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowerCAmelCase__ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = False
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if not self.initialized:
lowerCAmelCase : Union[str, Any] = RagRetriever(
snake_case__ , question_encoder_tokenizer=snake_case__ , generator_tokenizer=snake_case__ , index=snake_case__ , init_retrieval=snake_case__ , )
lowerCAmelCase : Optional[int] = True
def lowercase__ ( self ):
"""simple docstring"""
self.retriever.index.init_index()
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Dict = self.retriever._main_retrieve(snake_case__ , snake_case__ )
return doc_ids, retrieved_doc_embeds
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
"""simple docstring"""
if index is not None and index.is_initialized() and len(snake_case__ ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
snake_case__ , question_encoder_tokenizer=snake_case__ , generator_tokenizer=snake_case__ , index=snake_case__ , init_retrieval=snake_case__ , )
lowerCAmelCase : Any = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
for worker in self.retrieval_workers
] )
def lowercase__ ( self ):
"""simple docstring"""
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
lowerCAmelCase : Optional[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
lowerCAmelCase , lowerCAmelCase : List[Any] = ray.get(random_worker.retrieve.remote(snake_case__ , snake_case__ ) )
else:
lowerCAmelCase , lowerCAmelCase : List[str] = self._main_retrieve(snake_case__ , snake_case__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(snake_case__ )
@classmethod
def lowercase__ ( cls , snake_case__ , snake_case__=None , **snake_case__ ):
"""simple docstring"""
return super(snake_case__ , cls ).get_tokenizers(snake_case__ , snake_case__ , **snake_case__ )
@classmethod
def lowercase__ ( cls , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = kwargs.pop("config" , snake_case__ ) or RagConfig.from_pretrained(snake_case__ , **snake_case__ )
lowerCAmelCase : List[Any] = RagTokenizer.from_pretrained(snake_case__ , config=snake_case__ )
lowerCAmelCase : Tuple = rag_tokenizer.question_encoder
lowerCAmelCase : Any = rag_tokenizer.generator
if indexed_dataset is not None:
lowerCAmelCase : List[str] = "custom"
lowerCAmelCase : Optional[int] = CustomHFIndex(config.retrieval_vector_size , snake_case__ )
else:
lowerCAmelCase : Optional[Any] = cls._build_index(snake_case__ )
return cls(
snake_case__ , question_encoder_tokenizer=snake_case__ , generator_tokenizer=snake_case__ , retrieval_workers=snake_case__ , index=snake_case__ , )
| 108 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
if height >= 1:
move_tower(height - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
move_disk(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
move_tower(height - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
print("moving disk from" , SCREAMING_SNAKE_CASE , "to" , SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = int(input("Height of hanoi: " ).strip() )
move_tower(SCREAMING_SNAKE_CASE , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 108 | 1 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowercase (a__ ):
"""simple docstring"""
_UpperCAmelCase = ["vqvae"]
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , mel=SCREAMING_SNAKE_CASE_ , vqvae=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 5_0 if isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ) else 1_0_0_0
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__=True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = steps or self.get_default_steps()
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
SCREAMING_SNAKE_CASE_ : List[Any] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=SCREAMING_SNAKE_CASE_ , device=self.device , )
SCREAMING_SNAKE_CASE_ : Dict = noise
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.mel.audio_slice_to_image(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Any = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
SCREAMING_SNAKE_CASE_ : Any = (input_image / 2_5_5) * 2 - 1
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
SCREAMING_SNAKE_CASE_ : List[str] = self.vqvae.encode(torch.unsqueeze(SCREAMING_SNAKE_CASE_ , 0 ) ).latent_dist.sample(
generator=SCREAMING_SNAKE_CASE_ )[0]
SCREAMING_SNAKE_CASE_ : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
SCREAMING_SNAKE_CASE_ : int = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.scheduler.timesteps[start_step - 1] )
SCREAMING_SNAKE_CASE_ : Tuple = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(mask_start_secs * pixels_per_second )
SCREAMING_SNAKE_CASE_ : Any = int(mask_end_secs * pixels_per_second )
SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : List[str] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )['sample']
else:
SCREAMING_SNAKE_CASE_ : int = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )['sample']
if isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Dict = self.scheduler.step(
model_output=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , sample=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , )['prev_sample']
else:
SCREAMING_SNAKE_CASE_ : str = self.scheduler.step(
model_output=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , sample=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
SCREAMING_SNAKE_CASE_ : str = mask[:, step, :, :mask_start]
if mask_end > 0:
SCREAMING_SNAKE_CASE_ : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
SCREAMING_SNAKE_CASE_ : str = 1 / self.vqvae.config.scaling_factor * images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.vqvae.decode(SCREAMING_SNAKE_CASE_ )['sample']
SCREAMING_SNAKE_CASE_ : int = (images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ : Any = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
SCREAMING_SNAKE_CASE_ : Dict = (images * 2_5_5).round().astype('uint8' )
SCREAMING_SNAKE_CASE_ : List[str] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(SCREAMING_SNAKE_CASE_ , mode='RGB' ).convert('L' ) for _ in images) )
SCREAMING_SNAKE_CASE_ : Any = [self.mel.image_to_audio(SCREAMING_SNAKE_CASE_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(SCREAMING_SNAKE_CASE_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(SCREAMING_SNAKE_CASE_ ) )
@torch.no_grad()
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 5_0 ):
"""simple docstring"""
assert isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : List[Any] = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
SCREAMING_SNAKE_CASE_ : str = (sample / 2_5_5) * 2 - 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Tensor(SCREAMING_SNAKE_CASE_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
SCREAMING_SNAKE_CASE_ : str = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
SCREAMING_SNAKE_CASE_ : Any = self.scheduler.alphas_cumprod[t]
SCREAMING_SNAKE_CASE_ : str = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE_ : str = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE_ : int = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )['sample']
SCREAMING_SNAKE_CASE_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output
SCREAMING_SNAKE_CASE_ : List[str] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
SCREAMING_SNAKE_CASE_ : int = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = acos(torch.dot(torch.flatten(SCREAMING_SNAKE_CASE_ ) , torch.flatten(SCREAMING_SNAKE_CASE_ ) ) / torch.norm(SCREAMING_SNAKE_CASE_ ) / torch.norm(SCREAMING_SNAKE_CASE_ ) )
return sin((1 - alpha) * theta ) * xa / sin(SCREAMING_SNAKE_CASE_ ) + sin(alpha * theta ) * xa / sin(SCREAMING_SNAKE_CASE_ )
| 365 |
import socket
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Dict = socket.socket(socket.AF_INET, socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE_ : Any = socket.gethostname()
SCREAMING_SNAKE_CASE_ : List[str] = 1_2_3_1_2
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file', 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
SCREAMING_SNAKE_CASE_ : Tuple = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(A__ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 162 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
SCREAMING_SNAKE_CASE_ = coefficient_matrix.shape
SCREAMING_SNAKE_CASE_ = constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE_ = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(_UpperCAmelCase )
if colsa != 1:
SCREAMING_SNAKE_CASE_ = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(_UpperCAmelCase )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE_ = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(_UpperCAmelCase )
if len(_UpperCAmelCase ) != rowsa:
SCREAMING_SNAKE_CASE_ = (
'Number of initial values must be equal to number of rows in coefficient '
F'''matrix but received {len(_UpperCAmelCase )} and {rowsa}'''
)
raise ValueError(_UpperCAmelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
SCREAMING_SNAKE_CASE_ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
SCREAMING_SNAKE_CASE_ = table.shape
strictly_diagonally_dominant(_UpperCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = []
for row in range(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = 0
for col in range(_UpperCAmelCase ):
if col == row:
SCREAMING_SNAKE_CASE_ = table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE_ = (temp + val) / denom
new_val.append(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = new_val
return [float(_UpperCAmelCase ) for i in new_val]
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = table.shape
SCREAMING_SNAKE_CASE_ = True
for i in range(0 , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 118 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = CLIPTokenizer
_lowerCamelCase = CLIPTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = {}
_lowerCamelCase = False
def lowercase ( self : Tuple ) -> int:
super().setUp()
# fmt: off
lowercase : Dict = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase : List[Any] = dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowercase : List[str] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
lowercase : Union[str, Any] = {'unk_token': '<unk>'}
lowercase : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
lowercase : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase ) )
def lowercase ( self : Dict, **lowerCAmelCase : Optional[Any] ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase ( self : Optional[Any], **lowerCAmelCase : Tuple ) -> str:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase ( self : Optional[Any], lowerCAmelCase : List[Any] ) -> Optional[Any]:
lowercase : int = 'lower newer'
lowercase : str = 'lower newer'
return input_text, output_text
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
lowercase : str = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
lowercase : Union[str, Any] = 'lower newer'
lowercase : List[str] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
lowercase : List[str] = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
lowercase : int = tokens + [tokenizer.unk_token]
lowercase : Optional[int] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), lowerCAmelCase )
@require_ftfy
def lowercase ( self : Tuple ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : List[str] = self.tokenizer_class.from_pretrained(lowerCAmelCase, **lowerCAmelCase )
lowercase : Dict = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase, **lowerCAmelCase )
lowercase : Optional[int] = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
lowercase : int = tokenizer_s.tokenize(lowerCAmelCase )
lowercase : Any = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowercase : Optional[int] = 'xa\u0303y' + ' ' + 'x\xe3y'
lowercase : int = tokenizer_s.tokenize(lowerCAmelCase )
lowercase : Optional[Any] = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
# Test that the tokenization is identical on unicode of space type
lowercase : Any = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowercase : Optional[Any] = tokenizer_s.tokenize(lowerCAmelCase )
lowercase : Union[str, Any] = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
# Test that the tokenization is identical on unicode of line break type
lowercase : Optional[Any] = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowercase : str = tokenizer_s.tokenize(lowerCAmelCase )
lowercase : str = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : Any ) -> List[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
lowercase : Union[str, Any] = f'''{text_of_1_token} {text_of_1_token}'''
lowercase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase, use_fast=lowerCAmelCase, )
lowercase : Dict = tokenizer_r(lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0], (0, len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1], (len(lowerCAmelCase ) + 1, len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )), )
lowercase : Tuple = f''' {text}'''
lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase, use_fast=lowerCAmelCase, )
lowercase : Dict = tokenizer_r(lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(lowerCAmelCase ) + 1, 1 + len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )), )
def lowercase ( self : Dict ) -> List[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCAmelCase ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def lowercase ( self : List[Any] ) -> str:
super().test_tokenization_python_rust_equals()
def lowercase ( self : Dict ) -> Tuple:
# CLIP always lower cases letters
pass
| 255 | 0 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = "Usage of script: script_name <size_of_canvas:int>"
lowercase_ = [0] * 1_00 + [1] * 10
random.shuffle(choice)
def __lowerCAmelCase ( __UpperCAmelCase : int ):
'''simple docstring'''
__snake_case : List[str] = [[False for i in range(__SCREAMING_SNAKE_CASE )] for j in range(__SCREAMING_SNAKE_CASE )]
return canvas
def __lowerCAmelCase ( __UpperCAmelCase : list[list[bool]] ):
'''simple docstring'''
for i, row in enumerate(__SCREAMING_SNAKE_CASE ):
for j, _ in enumerate(__SCREAMING_SNAKE_CASE ):
__snake_case : int = bool(random.getrandbits(1 ) )
def __lowerCAmelCase ( __UpperCAmelCase : list[list[bool]] ):
'''simple docstring'''
__snake_case : Union[str, Any] = np.array(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__SCREAMING_SNAKE_CASE ):
for c, pt in enumerate(__SCREAMING_SNAKE_CASE ):
__snake_case : Optional[Any] = __judge_point(
__SCREAMING_SNAKE_CASE , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__snake_case : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__snake_case : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __lowerCAmelCase ( __UpperCAmelCase : bool , __UpperCAmelCase : list[list[bool]] ):
'''simple docstring'''
__snake_case : Any = 0
__snake_case : Dict = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__snake_case : str = pt
if pt:
if alive < 2:
__snake_case : Optional[Any] = False
elif alive == 2 or alive == 3:
__snake_case : Union[str, Any] = True
elif alive > 3:
__snake_case : Optional[int] = False
else:
if alive == 3:
__snake_case : List[Any] = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ , lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(["w", "k"])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 367 | import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[Any] = "encodec"
def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : Tuple=2_40_00 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : int=1_28 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Union[str, Any]=[8, 5, 4, 2] , _lowerCAmelCase : str="weight_norm" , _lowerCAmelCase : Tuple=7 , _lowerCAmelCase : str=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict="reflect" , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : int=1.0 , _lowerCAmelCase : Optional[int]=10_24 , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=True , **_lowerCAmelCase : List[Any] , ):
__snake_case : Optional[int] = target_bandwidths
__snake_case : int = sampling_rate
__snake_case : List[Any] = audio_channels
__snake_case : str = normalize
__snake_case : Union[str, Any] = chunk_length_s
__snake_case : Union[str, Any] = overlap
__snake_case : Union[str, Any] = hidden_size
__snake_case : Union[str, Any] = num_filters
__snake_case : Optional[Any] = num_residual_layers
__snake_case : List[Any] = upsampling_ratios
__snake_case : List[str] = norm_type
__snake_case : Union[str, Any] = kernel_size
__snake_case : Optional[int] = last_kernel_size
__snake_case : Optional[Any] = residual_kernel_size
__snake_case : Dict = dilation_growth_rate
__snake_case : int = use_causal_conv
__snake_case : Tuple = pad_mode
__snake_case : str = compress
__snake_case : Optional[Any] = num_lstm_layers
__snake_case : List[Any] = trim_right_ratio
__snake_case : Any = codebook_size
__snake_case : int = codebook_dim if codebook_dim is not None else hidden_size
__snake_case : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**_lowerCAmelCase )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case__ ( self : Union[str, Any] ):
__snake_case : List[str] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case__ ( self : Tuple ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 20 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase: List[Any] = logging.get_logger(__name__)
lowerCAmelCase: str = Dict[str, Any]
lowerCAmelCase: Optional[Any] = List[Prediction]
@add_end_docstrings(lowerCAmelCase_ )
class a__( lowerCAmelCase_ ):
def __init__( self : Optional[Any] , *__snake_case : str , **__snake_case : str ):
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowercase_ ( self : Union[str, Any] , **__snake_case : int ):
a : int = {}
if "threshold" in kwargs:
a : Any = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self : List[str] , *__snake_case : List[str] , **__snake_case : List[Any] ):
return super().__call__(*lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self : Optional[Any] , __snake_case : List[Any] ):
a : List[Any] = load_image(lowerCamelCase__ )
a : int = torch.IntTensor([[image.height, image.width]] )
a : Dict = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
a : Optional[Any] = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
a : Optional[int] = target_size
return inputs
def lowercase_ ( self : Tuple , __snake_case : str ):
a : Any = model_inputs.pop('target_size' )
a : List[str] = self.model(**lowerCamelCase__ )
a : Tuple = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
a : Tuple = model_inputs['bbox']
return model_outputs
def lowercase_ ( self : List[Any] , __snake_case : Tuple , __snake_case : int=0.9 ):
a : Dict = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
a , a : Optional[Any] = target_size[0].tolist()
def unnormalize(__snake_case : int ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
a , a : Any = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
a : List[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
a : Dict = [unnormalize(lowerCamelCase__ ) for bbox in model_outputs['bbox'].squeeze(0 )]
a : Union[str, Any] = ['score', 'label', 'box']
a : List[Any] = [dict(zip(lowerCamelCase__ , lowerCamelCase__ ) ) for vals in zip(scores.tolist() , lowerCamelCase__ , lowerCamelCase__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
a : Optional[Any] = self.image_processor.post_process_object_detection(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
a : List[str] = raw_annotations[0]
a : List[Any] = raw_annotation['scores']
a : Dict = raw_annotation['labels']
a : Any = raw_annotation['boxes']
a : Tuple = scores.tolist()
a : Optional[int] = [self.model.config.idalabel[label.item()] for label in labels]
a : str = [self._get_bounding_box(lowerCamelCase__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
a : Any = ['score', 'label', 'box']
a : Tuple = [
dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def lowercase_ ( self : Optional[Any] , __snake_case : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
a , a , a , a : Optional[int] = box.int().tolist()
a : Any = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox | 297 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = "git_vision_model"
def __init__( self : List[Any] ,lowerCamelCase__ : Dict=768 ,lowerCamelCase__ : Union[str, Any]=3072 ,lowerCamelCase__ : Optional[int]=12 ,lowerCamelCase__ : Tuple=12 ,lowerCamelCase__ : Tuple=3 ,lowerCamelCase__ : Optional[Any]=224 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : List[Any]="quick_gelu" ,lowerCamelCase__ : Optional[Any]=1e-5 ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : Optional[int]=0.02 ,**lowerCamelCase__ : Union[str, Any] ,) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = "git"
def __init__( self : Optional[int] ,lowerCamelCase__ : int=None ,lowerCamelCase__ : str=30522 ,lowerCamelCase__ : Tuple=768 ,lowerCamelCase__ : Union[str, Any]=6 ,lowerCamelCase__ : str=12 ,lowerCamelCase__ : List[str]=3072 ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : List[str]=1024 ,lowerCamelCase__ : List[str]=0.02 ,lowerCamelCase__ : str=1e-1_2 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Optional[int]="absolute" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=False ,lowerCamelCase__ : int=101 ,lowerCamelCase__ : int=102 ,lowerCamelCase__ : Dict=None ,**lowerCamelCase__ : List[Any] ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,pad_token_id=lowerCamelCase__ ,**lowerCamelCase__ )
if vision_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE = GitVisionConfig(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = tie_word_embeddings
SCREAMING_SNAKE_CASE = num_image_with_embedding
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 296 | 0 |
"""simple docstring"""
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str) -> float:
'''simple docstring'''
def get_matched_characters(UpperCamelCase_ : str, UpperCamelCase_ : str) -> str:
__lowercase = []
__lowercase = min(len(_stra), len(_stra)) // 2
for i, l in enumerate(_stra):
__lowercase = int(max(0, i - limit))
__lowercase = int(min(i + limit + 1, len(_stra)))
if l in _stra[left:right]:
matched.append(UpperCamelCase_)
__lowercase = F"""{_stra[0:_stra.index(UpperCamelCase_)]} {_stra[_stra.index(UpperCamelCase_) + 1:]}"""
return "".join(UpperCamelCase_)
# matching characters
__lowercase = get_matched_characters(UpperCamelCase_, UpperCamelCase_)
__lowercase = get_matched_characters(UpperCamelCase_, UpperCamelCase_)
__lowercase = len(UpperCamelCase_)
# transposition
__lowercase = (
len([(ca, ca) for ca, ca in zip(UpperCamelCase_, UpperCamelCase_) if ca != ca]) // 2
)
if not match_count:
__lowercase = 0.0
else:
__lowercase = (
1
/ 3
* (
match_count / len(UpperCamelCase_)
+ match_count / len(UpperCamelCase_)
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__lowercase = 0
for ca, ca in zip(stra[:4], stra[:4]):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 144 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 144 | 1 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowerCamelCase =data_utils.TransfoXLTokenizer
_lowerCamelCase =data_utils.TransfoXLCorpus
_lowerCamelCase =data_utils
_lowerCamelCase =data_utils
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(lowerCAmelCase_, 'rb' ) as fp:
SCREAMING_SNAKE_CASE =pickle.load(lowerCAmelCase_, encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
SCREAMING_SNAKE_CASE =pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
SCREAMING_SNAKE_CASE =corpus.vocab.__dict__
torch.save(lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =corpus.__dict__
corpus_dict_no_vocab.pop('vocab', lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(lowerCAmelCase_, lowerCAmelCase_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
SCREAMING_SNAKE_CASE =os.path.abspath(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =os.path.abspath(lowerCAmelCase_ )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
SCREAMING_SNAKE_CASE =TransfoXLConfig()
else:
SCREAMING_SNAKE_CASE =TransfoXLConfig.from_json_file(lowerCAmelCase_ )
print(F'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE =TransfoXLLMHeadModel(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =load_tf_weights_in_transfo_xl(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Save pytorch-model
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, lowerCAmelCase_ )
print(F'Save PyTorch model to {os.path.abspath(lowerCAmelCase_ )}' )
torch.save(model.state_dict(), lowerCAmelCase_ )
print(F'Save configuration file to {os.path.abspath(lowerCAmelCase_ )}' )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
_lowerCamelCase =parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 334 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"vocab_file": "vocab.txt"}
_lowerCamelCase ={
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase ={
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
with open(lowerCAmelCase_, 'r' ) as f:
SCREAMING_SNAKE_CASE =f.read().splitlines()
return [l.strip() for l in lines]
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self : int ,snake_case : Dict ,snake_case : Dict="<unk>" ,snake_case : Optional[int]="<cls>" ,snake_case : Optional[int]="<pad>" ,snake_case : int="<mask>" ,snake_case : Optional[int]="<eos>" ,**snake_case : List[str] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =load_vocab_file(snake_case )
SCREAMING_SNAKE_CASE =dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE ={tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE =unk_token
SCREAMING_SNAKE_CASE =cls_token
SCREAMING_SNAKE_CASE =pad_token
SCREAMING_SNAKE_CASE =mask_token
SCREAMING_SNAKE_CASE =eos_token
SCREAMING_SNAKE_CASE =self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : Dict ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Tuple ,snake_case : List[str] ,**snake_case : Any ):
return text.split()
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str=False ):
return len(self._id_to_token )
def _lowerCAmelCase ( self : List[str] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def _lowerCAmelCase ( self : List[Any] ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Any ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : List[str] ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE =[self.cls_token_id]
SCREAMING_SNAKE_CASE =[self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _lowerCAmelCase ( self : Optional[int] ,snake_case : List ,snake_case : Optional[List] = None ,snake_case : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE =[1] + ([0] * len(snake_case )) + [1]
if token_ids_a is not None:
mask += [0] * len(snake_case ) + [1]
return mask
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Dict ,snake_case : Any ):
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,(filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(snake_case ,'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def _lowerCAmelCase ( self : int ):
return self.get_vocab_size(with_added_tokens=snake_case )
def _lowerCAmelCase ( self : str ,snake_case : Union[List[str], List[AddedToken]] ,snake_case : bool = False ):
return super()._add_tokens(snake_case ,special_tokens=snake_case )
| 334 | 1 |
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[str]:
lowercase__: List[str] = np.max(lowerCamelCase_ , axis=-1 , keepdims=lowerCamelCase_ )
lowercase__: List[Any] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCamelCase_ )
class UpperCAmelCase (lowercase__ ):
"""simple docstring"""
def _snake_case ( self , **_UpperCAmelCase ):
lowercase__: Any = {}
if "second_text" in kwargs:
lowercase__: Tuple = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
return self.tokenizer(_UpperCamelCase , text_pair=_UpperCamelCase , return_tensors=self.framework )
def _snake_case ( self , _UpperCAmelCase ):
return self.model(**_UpperCamelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Dict = model_outputs.logits[0].numpy()
lowercase__: Optional[int] = softmax(_UpperCamelCase )
lowercase__: Dict = np.argmax(_UpperCamelCase )
lowercase__: Any = self.model.config.idalabel[best_class]
lowercase__: Tuple = probabilities[best_class].item()
lowercase__: Union[str, Any] = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 357 | """simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__A = "<<<<<<< This should probably be modified because it mentions: "
__A = "=======\n>>>>>>>\n"
__A = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
__A = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def _snake_case ( _UpperCAmelCase ):
lowercase__: int = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_UpperCAmelCase )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ):
lowercase__: List[str] = get_logger('''datasets-cli/converting''' )
lowercase__: Optional[Any] = tfds_path
lowercase__: Dict = datasets_directory
def _snake_case ( self ):
if os.path.isdir(self._tfds_path ):
lowercase__: Optional[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__: Optional[int] = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowercase__: int = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
lowercase__: Tuple = []
lowercase__: Dict = []
lowercase__: Any = {}
if os.path.isdir(self._tfds_path ):
lowercase__: Dict = os.listdir(_UpperCAmelCase )
else:
lowercase__: Dict = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
lowercase__: Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isfile(_UpperCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_UpperCAmelCase , encoding='''utf-8''' ) as f:
lowercase__: Tuple = f.readlines()
lowercase__: Optional[Any] = []
lowercase__: Dict = False
lowercase__: List[str] = False
lowercase__: List[Any] = []
for line in lines:
lowercase__: List[str] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__: Optional[int] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowercase__: Dict = ''''''
continue
elif "from absl import logging" in out_line:
lowercase__: Tuple = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowercase__: Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__: Any = True
lowercase__: str = list(filter(lambda _UpperCAmelCase : e in out_line , _UpperCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_UpperCAmelCase ) + '''\n''' )
out_lines.append(_UpperCAmelCase )
out_lines.append(_UpperCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__: List[Any] = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__: Any = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _UpperCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowercase__: List[str] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__: Optional[Any] = True
out_lines.append(_UpperCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__: Dict = f_name.replace('''.py''' , '''''' )
lowercase__: Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_UpperCAmelCase )
if needs_manual_update:
with_manual_update.append(_UpperCAmelCase )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(_UpperCAmelCase )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
lowercase__: str = os.path.basename(_UpperCAmelCase )
lowercase__: Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(_UpperCAmelCase , _UpperCAmelCase )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 2 | 0 |
def lowercase( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = [0] * len(lowerCAmelCase__ )
UpperCamelCase = []
UpperCamelCase = [1] * len(lowerCAmelCase__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCAmelCase__ ) ):
if indegree[i] == 0:
queue.append(lowerCAmelCase__ )
while queue:
UpperCamelCase = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCamelCase = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowerCAmelCase__ )
print(max(lowerCAmelCase__ ) )
# Adjacency list of Graph
_SCREAMING_SNAKE_CASE = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 343 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] = inspect.getfile(accelerate.test_utils )
lowerCAmelCase_ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
lowerCAmelCase_ : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
lowerCAmelCase_ : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Any ):
print(F"Found {torch.cuda.device_count()} devices." )
lowerCAmelCase_ : Optional[int] = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
print(F"Found {torch.cuda.device_count()} devices." )
lowerCAmelCase_ : int = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" )
lowerCAmelCase_ : Any = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase__ : Dict = Accelerator()
lowercase__ : List[Any] = (accelerator.state.process_index + 2, 1_0)
lowercase__ : Any = torch.randint(0, 1_0, shape).to(accelerator.device)
lowercase__ : List[Any] = """"""
lowercase__ : Union[str, Any] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase__ : Optional[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase__ : Any = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 224 | 0 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = OmegaConf.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase , map_location="cpu" )["model"]
SCREAMING_SNAKE_CASE_ = list(state_dict.keys() )
# extract state_dict for VQVAE
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = "first_stage_model."
for key in keys:
if key.startswith(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = state_dict[key]
# extract state_dict for UNetLDM
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = "model.diffusion_model."
for key in keys:
if key.startswith(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = state_dict[key]
SCREAMING_SNAKE_CASE_ = config.model.params.first_stage_config.params
SCREAMING_SNAKE_CASE_ = config.model.params.unet_config.params
SCREAMING_SNAKE_CASE_ = VQModel(**__UpperCamelCase ).eval()
vqvae.load_state_dict(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = UNetLDMModel(**__UpperCamelCase ).eval()
unet.load_state_dict(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__UpperCamelCase , )
SCREAMING_SNAKE_CASE_ = LDMPipeline(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
pipeline.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--config_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
A : int = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 351 | from math import pi, sqrt, tan
def a__ ( __UpperCamelCase ):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
SCREAMING_SNAKE_CASE_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(__UpperCamelCase , 2 ) * torus_radius * tube_radius
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def a__ ( __UpperCamelCase ):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
SCREAMING_SNAKE_CASE_ = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE_ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 305 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'beit'
def __init__( self , _a=8_192 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-12 , _a=224 , _a=16 , _a=3 , _a=False , _a=False , _a=False , _a=False , _a=0.1 , _a=0.1 , _a=True , _a=[3, 5, 7, 11] , _a=[1, 2, 3, 6] , _a=True , _a=0.4 , _a=256 , _a=1 , _a=False , _a=255 , **_a , ):
super().__init__(**_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = use_mask_token
__a = use_absolute_position_embeddings
__a = use_relative_position_bias
__a = use_shared_relative_position_bias
__a = layer_scale_init_value
__a = drop_path_rate
__a = use_mean_pooling
# decode head attributes (semantic segmentation)
__a = out_indices
__a = pool_scales
# auxiliary head attributes (semantic segmentation)
__a = use_auxiliary_head
__a = auxiliary_loss_weight
__a = auxiliary_channels
__a = auxiliary_num_convs
__a = auxiliary_concat_input
__a = semantic_loss_ignore_index
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = version.parse('1.11' )
@property
def __UpperCAmelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __UpperCAmelCase ( self ):
return 1E-4
| 45 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class lowerCAmelCase ( lowerCAmelCase_ ):
'''simple docstring'''
_A : List[str] = """decision_transformer"""
_A : List[Any] = ["""past_key_values"""]
_A : Dict = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Tuple , __a : List[Any]=17 , __a : List[Any]=4 , __a : str=128 , __a : Any=4096 , __a : Union[str, Any]=True , __a : int=1 , __a : List[Any]=1024 , __a : int=3 , __a : int=1 , __a : Optional[Any]=None , __a : int="relu" , __a : int=0.1 , __a : List[Any]=0.1 , __a : List[Any]=0.1 , __a : Union[str, Any]=1E-5 , __a : Union[str, Any]=0.02 , __a : List[Any]=True , __a : List[str]=True , __a : Optional[int]=50256 , __a : Any=50256 , __a : List[str]=False , __a : List[Any]=False , **__a : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = state_dim
__lowercase : Dict = act_dim
__lowercase : Optional[Any] = hidden_size
__lowercase : List[str] = max_ep_len
__lowercase : List[str] = action_tanh
__lowercase : str = vocab_size
__lowercase : Union[str, Any] = n_positions
__lowercase : Tuple = n_layer
__lowercase : Any = n_head
__lowercase : str = n_inner
__lowercase : Tuple = activation_function
__lowercase : Any = resid_pdrop
__lowercase : int = embd_pdrop
__lowercase : str = attn_pdrop
__lowercase : Union[str, Any] = layer_norm_epsilon
__lowercase : Optional[Any] = initializer_range
__lowercase : Dict = scale_attn_weights
__lowercase : List[str] = use_cache
__lowercase : Optional[Any] = scale_attn_by_inverse_layer_idx
__lowercase : Any = reorder_and_upcast_attn
__lowercase : Union[str, Any] = bos_token_id
__lowercase : int = eos_token_id
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) | 352 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__lowercase : List[str] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__lowercase : Optional[Any] = model(__a )["""last_hidden_state"""]
__lowercase : Any = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
__lowercase : Dict = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) ) | 306 | 0 |
"""simple docstring"""
import os
import sys
import unittest
__lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__lowercase = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
__lowercase = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[int]):
a : Union[str, Any] = get_test_to_tester_mapping(__UpperCAmelCase)
a : Dict = get_test_to_tester_mapping(__UpperCAmelCase)
a : List[str] = {"BertModelTest": "BertModelTester"}
a : List[str] = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase) , __UpperCAmelCase)
self.assertEqual(get_test_info.to_json(__UpperCAmelCase) , __UpperCAmelCase)
def __snake_case ( self : Optional[Any]):
a : List[Any] = get_model_to_test_mapping(__UpperCAmelCase)
a : int = get_model_to_test_mapping(__UpperCAmelCase)
a : List[Any] = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
a : List[Any] = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase) , __UpperCAmelCase)
self.assertEqual(get_test_info.to_json(__UpperCAmelCase) , __UpperCAmelCase)
def __snake_case ( self : Optional[int]):
a : Optional[int] = get_model_to_tester_mapping(__UpperCAmelCase)
a : Union[str, Any] = get_model_to_tester_mapping(__UpperCAmelCase)
a : Dict = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
a : int = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase) , __UpperCAmelCase)
self.assertEqual(get_test_info.to_json(__UpperCAmelCase) , __UpperCAmelCase)
| 40 |
import random
from typing import Any
def a_ ( _A ) -> list[Any]:
"""simple docstring"""
for _ in range(len(_A ) ):
snake_case__ = random.randint(0 , len(_A ) - 1 )
snake_case__ = random.randint(0 , len(_A ) - 1 )
snake_case__ , snake_case__ = data[b], data[a]
return data
if __name__ == "__main__":
__UpperCamelCase : Dict = [0, 1, 2, 3, 4, 5, 6, 7]
__UpperCamelCase : Any = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 307 | 0 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class __UpperCamelCase :
def __init__( self , __a , __a , __a , __a , __a , __a=0.2 , __a=0.2 ):
'''simple docstring'''
__a : List[str] = bp_numa
__a : Union[str, Any] = bp_numa
__a : Union[str, Any] = bp_numa
__a : Optional[int] = conva_get[:2]
__a : List[str] = conva_get[2]
__a : str = size_pa
__a : Optional[Any] = rate_w
__a : Any = rate_t
__a : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
__a : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__a : List[str] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__a : Dict = -2 * np.random.rand(self.conva[1] ) + 1
__a : int = -2 * np.random.rand(self.num_bpa ) + 1
__a : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[str] = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(__a , 'wb' ) as f:
pickle.dump(__a , __a )
print(f"""Model saved: {save_path}""" )
@classmethod
def __UpperCAmelCase ( cls , __a ):
'''simple docstring'''
with open(__a , 'rb' ) as f:
__a : List[Any] = pickle.load(__a ) # noqa: S301
__a : Union[str, Any] = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
__a : List[Any] = model_dic.get('size_pooling1' )
__a : List[str] = model_dic.get('num_bp1' )
__a : List[Any] = model_dic.get('num_bp2' )
__a : Optional[int] = model_dic.get('num_bp3' )
__a : Tuple = model_dic.get('rate_weight' )
__a : Optional[int] = model_dic.get('rate_thre' )
# create model instance
__a : str = CNN(__a , __a , __a , __a , __a , __a , __a )
# modify model parameter
__a : List[Any] = model_dic.get('w_conv1' )
__a : int = model_dic.get('wkj' )
__a : List[Any] = model_dic.get('vji' )
__a : int = model_dic.get('thre_conv1' )
__a : Optional[int] = model_dic.get('thre_bp2' )
__a : Dict = model_dic.get('thre_bp3' )
return conv_ins
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return round(__a , 3 )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[str] = convs[0]
__a : Dict = convs[1]
__a : Tuple = np.shape(__a )[0]
# get the data slice of original image data, data_focus
__a : Optional[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , __a ):
for j_focus in range(0 , size_data - size_conv + 1 , __a ):
__a : Tuple = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__a )
# calculate the feature map of every single kernel, and saved as list of matrix
__a : Dict = []
__a : str = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__a ):
__a : int = []
for i_focus in range(len(__a ) ):
__a : Optional[Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__a ) )
__a : Tuple = np.asmatrix(__a ).reshape(
__a , __a )
data_featuremap.append(__a )
# expanding the data slice to One dimenssion
__a : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__a ) )
__a : Tuple = np.asarray(__a )
return focus_list, data_featuremap
def __UpperCAmelCase ( self , __a , __a , __a="average_pool" ):
'''simple docstring'''
__a : List[Any] = len(featuremaps[0] )
__a : Dict = int(size_map / size_pooling )
__a : Optional[int] = []
for i_map in range(len(__a ) ):
__a : List[str] = featuremaps[i_map]
__a : Dict = []
for i_focus in range(0 , __a , __a ):
for j_focus in range(0 , __a , __a ):
__a : Dict = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__a ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__a ) )
__a : Dict = np.asmatrix(__a ).reshape(__a , __a )
featuremap_pooled.append(__a )
return featuremap_pooled
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[Any] = []
for i in range(len(__a ) ):
__a : str = np.shape(data[i] )
__a : Optional[Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
__a : Optional[int] = data_listed.getA().tolist()[0]
data_expanded.extend(__a )
__a : Any = np.asarray(__a )
return data_expanded
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Union[str, Any] = np.asarray(__a )
__a : str = np.shape(__a )
__a : Dict = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : int = []
__a : int = 0
for i_map in range(__a ):
__a : Tuple = np.ones((size_map, size_map) )
for i in range(0 , __a , __a ):
for j in range(0 , __a , __a ):
__a : List[Any] = pd_pool[
i_pool
]
__a : Union[str, Any] = i_pool + 1
__a : str = np.multiply(
__a , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(__a )
return pd_all
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a=bool ):
'''simple docstring'''
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(__a )) )
print((' - - Shape: Teach_Data ', np.shape(__a )) )
__a : Union[str, Any] = 0
__a : Optional[int] = []
__a : List[str] = 1_0000
while rp < n_repeat and mse >= error_accuracy:
__a : Optional[int] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(__a ) ):
# print('------------Learning Image: %d--------------'%p)
__a : Any = np.asmatrix(datas_train[p] )
__a : str = np.asarray(datas_teach[p] )
__a , __a : Optional[Any] = self.convolute(
__a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__a : Optional[int] = self.pooling(__a , self.size_poolinga )
__a : List[Any] = np.shape(__a )
__a : str = self._expand(__a )
__a : Optional[int] = data_bp_input
__a : Optional[Any] = np.dot(__a , self.vji.T ) - self.thre_bpa
__a : str = self.sig(__a )
__a : Any = np.dot(__a , self.wkj.T ) - self.thre_bpa
__a : Dict = self.sig(__a )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
__a : str = np.multiply(
(data_teach - bp_outa) , np.multiply(__a , (1 - bp_outa) ) )
__a : Optional[int] = np.multiply(
np.dot(__a , self.wkj ) , np.multiply(__a , (1 - bp_outa) ) )
__a : Tuple = np.dot(__a , self.vji )
__a : Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
__a : Optional[Any] = pd_conva_pooled.T.getA().tolist()
__a : Any = self._calculate_gradient_from_pool(
__a , __a , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
__a : List[str] = self._expand_mat(pd_conva_all[k_conv] )
__a : Union[str, Any] = self.rate_weight * np.dot(__a , __a )
__a : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
__a : Union[str, Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
__a : Any = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
__a : List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
__a : Union[str, Any] = self.thre_bpa - pd_k_all * self.rate_thre
__a : str = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
__a : Tuple = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
__a : int = rp + 1
__a : Tuple = error_count / patterns
all_mse.append(__a )
def draw_error():
__a : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__a , '+-' )
plt.plot(__a , 'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(__a , alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Any = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(__a )) )
for p in range(len(__a ) ):
__a : Optional[int] = np.asmatrix(datas_test[p] )
__a , __a : int = self.convolute(
__a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__a : Optional[int] = self.pooling(__a , self.size_poolinga )
__a : List[str] = self._expand(__a )
__a : Dict = data_bp_input
__a : int = bp_outa * self.vji.T - self.thre_bpa
__a : Any = self.sig(__a )
__a : List[Any] = bp_outa * self.wkj.T - self.thre_bpa
__a : Optional[int] = self.sig(__a )
produce_out.extend(bp_outa.getA().tolist() )
__a : Dict = [list(map(self.do_round , __a ) ) for each in produce_out]
return np.asarray(__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = np.asmatrix(__a )
__a , __a : int = self.convolute(
__a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__a : Tuple = self.pooling(__a , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 294 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ):
'''simple docstring'''
__a : int = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = seq_length
__a : List[str] = is_training
__a : Any = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : Any = use_labels
__a : List[str] = vocab_size
__a : str = hidden_size
__a : List[str] = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[int] = intermediate_size
__a : Tuple = hidden_act
__a : Union[str, Any] = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : Dict = type_vocab_size
__a : Any = type_sequence_label_size
__a : Dict = initializer_range
__a : Optional[Any] = num_labels
__a : Optional[Any] = num_choices
__a : Union[str, Any] = relative_attention
__a : List[str] = position_biased_input
__a : List[Any] = pos_att_type
__a : Tuple = scope
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[Any] = None
if self.use_input_mask:
__a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__a : Any = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Optional[int] = None
__a : int = None
__a : Dict = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__a : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Dict = DebertaVaModel(config=__a )
model.to(__a )
model.eval()
__a : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a )[0]
__a : str = model(__a , token_type_ids=__a )[0]
__a : Optional[int] = model(__a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : int = DebertaVaForMaskedLM(config=__a )
model.to(__a )
model.eval()
__a : List[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[Any] = self.num_labels
__a : List[Any] = DebertaVaForSequenceClassification(__a )
model.to(__a )
model.eval()
__a : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__a )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Dict = DebertaVaForTokenClassification(config=__a )
model.to(__a )
model.eval()
__a : str = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[str] = DebertaVaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__a : str = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = DebertaVaForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__a : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : int = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Dict = config_and_inputs
__a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = DebertaVaModelTester(self )
__a : List[str] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : str = DebertaVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
__a : Optional[Any] = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__a : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a : int = model(__a , attention_mask=__a )[0]
# compare the actual values for a slice.
__a : str = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 294 | 1 |
"""simple docstring"""
_a = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 61 |
"""simple docstring"""
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
return round(float(moles / volume ) * nfactor )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 | 1 |
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : List[str]=None , lowercase__ : str=None , lowercase__ : List[Any]=None , lowercase__ : Dict=None , lowercase__ : List[Any]=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
lowerCAmelCase_ :str = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase_ :List[str] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase_ :Dict = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=lowercase__ )
if decoder_head_mask is None:
lowerCAmelCase_ :List[str] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowercase__ )
if cross_attn_head_mask is None:
lowerCAmelCase_ :Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowercase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=False , __A=99 , __A=16 , __A=2 , __A=4 , __A=4 , __A="relu" , __A=0.1 , __A=0.1 , __A=0.0 , __A=0.0 , __A=20 , __A=2 , __A=1 , __A=0 , ) -> int:
lowerCAmelCase_ :Union[str, Any] = parent
lowerCAmelCase_ :Tuple = batch_size
lowerCAmelCase_ :Optional[Any] = seq_length
lowerCAmelCase_ :str = is_training
lowerCAmelCase_ :str = use_labels
lowerCAmelCase_ :List[str] = vocab_size
lowerCAmelCase_ :Dict = hidden_size
lowerCAmelCase_ :Optional[Any] = num_hidden_layers
lowerCAmelCase_ :Any = num_attention_heads
lowerCAmelCase_ :Union[str, Any] = intermediate_size
lowerCAmelCase_ :Optional[int] = hidden_act
lowerCAmelCase_ :int = hidden_dropout_prob
lowerCAmelCase_ :Tuple = attention_probs_dropout_prob
lowerCAmelCase_ :Optional[Any] = encoder_layerdrop
lowerCAmelCase_ :Optional[int] = decoder_layerdrop
lowerCAmelCase_ :Dict = max_position_embeddings
lowerCAmelCase_ :Optional[int] = eos_token_id
lowerCAmelCase_ :Optional[int] = pad_token_id
lowerCAmelCase_ :List[str] = bos_token_id
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ :Tuple = self.eos_token_id # Eos Token
lowerCAmelCase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase_ :List[str] = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase_ :int = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase_ :List[Any] = self.get_config()
lowerCAmelCase_ :int = prepare_mam_aaa_inputs_dict(__A , __A , __A )
return config, inputs_dict
def __lowerCAmelCase ( self ) -> int:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self , __A , __A ) -> Any:
lowerCAmelCase_ :str = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval()
lowerCAmelCase_ :Union[str, Any] = inputs_dict["""input_ids"""]
lowerCAmelCase_ :Optional[Any] = inputs_dict["""attention_mask"""]
lowerCAmelCase_ :Any = inputs_dict["""head_mask"""]
# first forward pass
lowerCAmelCase_ :int = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
lowerCAmelCase_ :List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase_ :str = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ :List[Any] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCAmelCase_ :List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase_ :str = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCAmelCase_ :Dict = model(__A , attention_mask=__A )["""last_hidden_state"""]
lowerCAmelCase_ :Dict = model(__A , attention_mask=__A , past_key_values=__A )[
"""last_hidden_state"""
]
# select random slice
lowerCAmelCase_ :Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase_ :int = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase_ :Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1E-2 ) )
def __lowerCAmelCase ( self , __A , __A ) -> int:
lowerCAmelCase_ :List[str] = MaMaaaModel(config=__A ).to(__A ).eval()
lowerCAmelCase_ :Dict = model(**__A )
lowerCAmelCase_ :Tuple = outputs.encoder_last_hidden_state
lowerCAmelCase_ :Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ :Optional[int] = model.get_encoder()
encoder.save_pretrained(__A )
lowerCAmelCase_ :Union[str, Any] = MaMaaaEncoder.from_pretrained(__A ).to(__A )
lowerCAmelCase_ :List[Any] = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ :Union[str, Any] = model.get_decoder()
decoder.save_pretrained(__A )
lowerCAmelCase_ :Any = MaMaaaDecoder.from_pretrained(__A ).to(__A )
lowerCAmelCase_ :Tuple = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :Union[str, Any] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ :str = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase_ :Dict = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ :Optional[Any] = True
UpperCAmelCase_ :Optional[int] = True
UpperCAmelCase_ :List[str] = False
UpperCAmelCase_ :Optional[Any] = False
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A ) -> Optional[int]:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :str = MaMaaaModelTester(self )
lowerCAmelCase_ :Optional[Any] = ConfigTester(self , config_class=__A )
def __lowerCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCAmelCase_ :Union[str, Any] = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
lowerCAmelCase_ :Optional[Any] = model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info["""missing_keys"""] , [] )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowerCAmelCase_ :Any = model_class(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :str = copy.deepcopy(self._prepare_for_class(__A , __A ) )
if not self.is_encoder_decoder:
lowerCAmelCase_ :List[str] = inputs["""input_ids"""]
del inputs["input_ids"]
else:
lowerCAmelCase_ :Tuple = inputs["""input_ids"""]
lowerCAmelCase_ :Tuple = inputs.get("""decoder_input_ids""" , __A )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , __A )
lowerCAmelCase_ :Any = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowerCAmelCase_ :Optional[int] = wte(__A )
else:
lowerCAmelCase_ :Tuple = wte(__A )
lowerCAmelCase_ :List[str] = wte(__A )
with torch.no_grad():
model(**__A )[0]
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ :str = input_dict["""input_ids"""]
lowerCAmelCase_ :Optional[int] = input_ids.ne(1 ).to(__A )
lowerCAmelCase_ :int = MaMaaaForConditionalGeneration(__A ).eval().to(__A )
if torch_device == "cuda":
model.half()
model.generate(__A , attention_mask=__A )
model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 )
def _snake_case ( lowercase__ : int ) -> List[str]:
'''simple docstring'''
return torch.tensor(lowercase__ , dtype=torch.long , device=lowercase__ )
__UpperCAmelCase = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> Dict:
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[int] = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(__A )
lowerCAmelCase_ :Dict = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
lowerCAmelCase_ :Optional[int] = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
lowerCAmelCase_ :List[str] = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
lowerCAmelCase_ :Union[str, Any] = model(**__A )[0]
lowerCAmelCase_ :Union[str, Any] = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , __A )
# change to expected output here
lowerCAmelCase_ :List[Any] = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Dict = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__A )
# change to intended input
lowerCAmelCase_ :Tuple = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
lowerCAmelCase_ :Optional[Any] = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
lowerCAmelCase_ :str = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
lowerCAmelCase_ :str = model(**__A )[0]
lowerCAmelCase_ :str = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , __A )
# change to expected output here
lowerCAmelCase_ :Union[str, Any] = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :List[str] = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__A )
lowerCAmelCase_ :Tuple = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
lowerCAmelCase_ :Optional[Any] = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowerCAmelCase_ :List[str] = tokenizer(__A , padding=__A , return_tensors="""pt""" )
lowerCAmelCase_ :Dict = model.generate(
input_ids=dct["""input_ids"""].to(__A ) , attention_mask=dct["""attention_mask"""].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
lowerCAmelCase_ :List[str] = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
lowerCAmelCase_ :Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A )
assert generated == expected_en
| 363 |
"""simple docstring"""
import itertools
import math
def _snake_case ( lowercase__ : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = 2
while True:
if is_prime(lowercase__ ):
yield num
num += 1
def _snake_case ( lowercase__ : int = 1_0_0_0_1 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , lowercase__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 | 0 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __a ( __lowerCamelCase, __lowerCamelCase ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
UpperCAmelCase_ : Any = flax_key_tuple[:-1] + ("weight",)
UpperCAmelCase_ : Optional[int] = torch.permute(__lowerCamelCase, (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ):
# linear layer
UpperCAmelCase_ : Any = flax_key_tuple[:-1] + ("weight",)
UpperCAmelCase_ : Any = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase_ : List[str] = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if "metadata" in layer:
UpperCAmelCase_ : List[Any] = layer.split("metadata" )
UpperCAmelCase_ : List[str] = "".join(split_layer[0] )[:-1]
UpperCAmelCase_ : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
UpperCAmelCase_ : int = layer.split("kvstore" )
UpperCAmelCase_ : str = "".join(split_layer[0] )[:-1]
UpperCAmelCase_ : str = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
UpperCAmelCase_ : Any = layer.split("/" )
UpperCAmelCase_ : Union[str, Any] = "/".join(split_layer[:-1] )
UpperCAmelCase_ : int = (split_layer[-1],)
if "kvstore/path" in layer:
UpperCAmelCase_ : Any = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
UpperCAmelCase_ : Dict = "file"
else:
UpperCAmelCase_ : List[str] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = rename_keys(__lowerCamelCase )
UpperCAmelCase_ : Dict = {}
for k, v in current_block.items():
UpperCAmelCase_ : Tuple = v
UpperCAmelCase_ : Any = new_current_block
torch.save(__lowerCamelCase, __lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = WEIGHTS_NAME ):
UpperCAmelCase_ : Optional[Any] = convert_file_size_to_int(__lowerCamelCase )
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : List[Any] = {}
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : List[Any] = 0
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint", "rb" ) as fp:
UpperCAmelCase_ : List[str] = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
UpperCAmelCase_ : Any = flatten_dict(__lowerCamelCase, sep="/" )
UpperCAmelCase_ : List[str] = {}
for layer in checkpoint_info.keys():
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = get_key_and_tensorstore_dict(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if curr_real_layer_name in all_layers:
UpperCAmelCase_ : Union[str, Any] = content
else:
UpperCAmelCase_ : Tuple = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
UpperCAmelCase_ : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
UpperCAmelCase_ : Dict = torch.tensor(__lowerCamelCase )
UpperCAmelCase_ : int = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = rename_base_flax_keys(tuple(key.split("/" ) ), __lowerCamelCase )
UpperCAmelCase_ : Optional[int] = "/".join(__lowerCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
UpperCAmelCase_ : Tuple = os.path.join(
__lowerCamelCase, weights_name.replace(".bin", f"""-{len(__lowerCamelCase )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__lowerCamelCase, __lowerCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
UpperCAmelCase_ : List[Any] = {}
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Union[str, Any] = raw_weights.to(getattr(__lowerCamelCase, __lowerCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
UpperCAmelCase_ : Optional[Any] = os.path.join(__lowerCamelCase, weights_name.replace(".bin", f"""-{len(__lowerCamelCase )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__lowerCamelCase, __lowerCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__lowerCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : Any = {}
for idx, shard in enumerate(__lowerCamelCase ):
UpperCAmelCase_ : List[str] = weights_name.replace(
".bin", f"""-{idx+1:05d}-of-{len(__lowerCamelCase ):05d}.bin""" ) # len(sharded_state_dicts):05d}
UpperCAmelCase_ : Optional[int] = os.path.join(__lowerCamelCase, weights_name.replace(".bin", f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__lowerCamelCase, os.path.join(__lowerCamelCase, __lowerCamelCase ) )
UpperCAmelCase_ : Union[str, Any] = shard
for key in shard:
UpperCAmelCase_ : int = shard_file
# Add the metadata
UpperCAmelCase_ : List[str] = {"total_size": total_size}
UpperCAmelCase_ : str = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(__lowerCamelCase, __lowerCamelCase ), "w", encoding="utf-8" ) as f:
UpperCAmelCase_ : List[Any] = json.dumps(__lowerCamelCase, indent=2, sort_keys=__lowerCamelCase ) + "\n"
f.write(__lowerCamelCase )
return metadata, index
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
_a = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __a ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
UpperCAmelCase_ : Optional[Any] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
UpperCAmelCase_ : Any = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted", device_map="auto" )
UpperCAmelCase_ : Union[str, Any] = TaTokenizer.from_pretrained("t5-small" )
UpperCAmelCase_ : Union[str, Any] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
UpperCAmelCase_ : Tuple = tokenizer(__lowerCamelCase, return_tensors="pt" ).input_ids
UpperCAmelCase_ : List[Any] = model.generate(__lowerCamelCase, decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 61 |
"""simple docstring"""
import argparse
from collections import defaultdict
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__lowerCamelCase, "r" ) as f:
UpperCAmelCase_ : List[Any] = f.readlines()
UpperCAmelCase_ : int = f"""class {class_name}("""
UpperCAmelCase_ : Optional[Any] = f"""{4 * " "}def {test_name}("""
UpperCAmelCase_ : Optional[Any] = f"""{8 * " "}{correct_line.split()[0]}"""
UpperCAmelCase_ : Tuple = f"""{16 * " "}{correct_line.split()[0]}"""
UpperCAmelCase_ : int = False
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : int = []
for line in lines:
if line.startswith(__lowerCamelCase ):
UpperCAmelCase_ : Tuple = True
elif in_class and line.startswith(__lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = True
elif in_class and in_func and (line.startswith(__lowerCamelCase ) or line.startswith(__lowerCamelCase )):
UpperCAmelCase_ : Any = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase_ : Union[str, Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase_ : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * " "}{correct_line}""" )
UpperCAmelCase_ : int = False
else:
new_lines.append(__lowerCamelCase )
with open(__lowerCamelCase, "w" ) as f:
for line in new_lines:
f.write(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase=None ):
if fail is not None:
with open(__lowerCamelCase, "r" ) as f:
UpperCAmelCase_ : Tuple = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase_ : str = None
with open(__lowerCamelCase, "r" ) as f:
UpperCAmelCase_ : Optional[int] = f.readlines()
UpperCAmelCase_ : Any = defaultdict(__lowerCamelCase )
for line in correct_lines:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
_a = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 61 | 1 |
'''simple docstring'''
def _A (lowerCAmelCase__ ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError('Input value must be a \'int\' type' )
return bin(_lowerCamelCase ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( _SCREAMING_SNAKE_CASE ):
def __init__( self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_12 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.0_2 , __magic_name__=False , __magic_name__=True , __magic_name__="None" , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ) -> Any:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = relative_attention
_a = position_biased_input
_a = pos_att_type
_a = scope
def __UpperCAmelCase ( self ) -> List[str]:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.get_config()
_a = 3_00
return config
def __UpperCAmelCase ( self , __magic_name__ ) -> Dict:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
_a = DebertaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )[0]
_a = model(__magic_name__ , token_type_ids=__magic_name__ )[0]
_a = model(__magic_name__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
_a = DebertaForMaskedLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
_a = self.num_labels
_a = DebertaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
_a = self.num_labels
_a = DebertaForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
_a = DebertaForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ) -> Any:
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCAmelCase = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __UpperCAmelCase ( self ) -> List[str]:
_a = DebertaModelTester(self )
_a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def __UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> Any:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__magic_name__ )
def __UpperCAmelCase ( self ) -> str:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__magic_name__ )
def __UpperCAmelCase ( self ) -> Dict:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__magic_name__ )
def __UpperCAmelCase ( self ) -> Any:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__magic_name__ )
@slow
def __UpperCAmelCase ( self ) -> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = DebertaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __UpperCAmelCase ( self ) -> Dict:
pass
@slow
def __UpperCAmelCase ( self ) -> int:
_a = DebertaModel.from_pretrained('microsoft/deberta-base' )
_a = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_a = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_a = model(__magic_name__ , attention_mask=__magic_name__ )[0]
# compare the actual values for a slice.
_a = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) , f'{output[:, 1:4, 1:4]}' )
| 104 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=30 , snake_case__=400 , snake_case__=True , snake_case__=None , snake_case__=0.9 , snake_case__=None , snake_case__=True , snake_case__=[0.5, 0.5, 0.5] , snake_case__=[0.5, 0.5, 0.5] , ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] =size if size is not None else {"shortest_edge": 30}
UpperCAmelCase : int =crop_size if crop_size is not None else {"height": 30, "width": 30}
UpperCAmelCase : Optional[Any] =parent
UpperCAmelCase : Any =batch_size
UpperCAmelCase : int =num_channels
UpperCAmelCase : Any =min_resolution
UpperCAmelCase : Union[str, Any] =max_resolution
UpperCAmelCase : int =do_resize_and_center_crop
UpperCAmelCase : Union[str, Any] =size
UpperCAmelCase : List[str] =crop_pct
UpperCAmelCase : str =crop_size
UpperCAmelCase : List[str] =do_normalize
UpperCAmelCase : Dict =image_mean
UpperCAmelCase : Any =image_std
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __snake_case ( A__ , unittest.TestCase ):
__lowerCamelCase : List[Any] = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : int =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''crop_pct''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Dict =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
UpperCAmelCase : List[Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase : Optional[Any] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase : Union[str, Any] =image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase : List[Any] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase : Dict =image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase : List[str] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase : Dict =image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 348 | from collections.abc import Sequence
from queue import Queue
class a__ :
def __init__( self : int,_A : List[Any],_A : Optional[Any],_A : Optional[int],_A : int=None,_A : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = start
SCREAMING_SNAKE_CASE_ : List[str] = end
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : List[str] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Optional[int] = left
SCREAMING_SNAKE_CASE_ : str = right
def __repr__( self : Tuple ):
"""simple docstring"""
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class a__ :
def __init__( self : Any,_A : Sequence,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = collection
SCREAMING_SNAKE_CASE_ : Optional[int] = function
if self.collection:
SCREAMING_SNAKE_CASE_ : List[str] = self._build_tree(0,len(_A ) - 1 )
def __UpperCamelCase ( self : int,_A : Any,_A : List[Any] ):
"""simple docstring"""
self._update_tree(self.root,_A,_A )
def __UpperCamelCase ( self : str,_A : Any,_A : List[Any] ):
"""simple docstring"""
return self._query_range(self.root,_A,_A )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : int ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_A,_A,self.collection[start] )
SCREAMING_SNAKE_CASE_ : List[Any] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._build_tree(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._build_tree(mid + 1,_A )
return SegmentTreeNode(_A,_A,self.fn(left.val,right.val ),_A,_A )
def __UpperCamelCase ( self : int,_A : int,_A : Tuple,_A : Dict ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left,_A,_A )
else:
self._update_tree(node.right,_A,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.fn(node.left.val,node.right.val )
def __UpperCamelCase ( self : str,_A : List[str],_A : Optional[int],_A : Optional[Any] ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left,_A,_A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left,_A,node.mid ),self._query_range(node.right,node.mid + 1,_A ),)
else:
# range in right child tree
return self._query_range(node.right,_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ : int = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowerCamelCase : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 18 | 0 |
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = F"{sampling_rate}"
UpperCamelCase = "1"
UpperCamelCase = "f32le"
UpperCamelCase = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(_SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCamelCase = ffmpeg_process.communicate(_SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
UpperCamelCase = output_stream[0]
UpperCamelCase = np.frombuffer(_SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "f32le" , ):
"""simple docstring"""
UpperCamelCase = F"{sampling_rate}"
UpperCamelCase = "1"
if format_for_conversion == "s16le":
UpperCamelCase = 2
elif format_for_conversion == "f32le":
UpperCamelCase = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
UpperCamelCase = platform.system()
if system == "Linux":
UpperCamelCase = "alsa"
UpperCamelCase = "default"
elif system == "Darwin":
UpperCamelCase = "avfoundation"
UpperCamelCase = ":0"
elif system == "Windows":
UpperCamelCase = "dshow"
UpperCamelCase = "default"
UpperCamelCase = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
UpperCamelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCamelCase = _ffmpeg_stream(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
UpperCamelCase = stream_chunk_s
else:
UpperCamelCase = chunk_length_s
UpperCamelCase = ffmpeg_microphone(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , format_for_conversion=_SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
UpperCamelCase = np.intaa
UpperCamelCase = 2
elif format_for_conversion == "f32le":
UpperCamelCase = np.floataa
UpperCamelCase = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
UpperCamelCase = chunk_length_s / 6
UpperCamelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
UpperCamelCase = [stride_length_s, stride_length_s]
UpperCamelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCamelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCamelCase = datetime.datetime.now()
UpperCamelCase = datetime.timedelta(seconds=_SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=_SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
UpperCamelCase = np.frombuffer(item["raw"] , dtype=_SCREAMING_SNAKE_CASE )
UpperCamelCase = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
UpperCamelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
UpperCamelCase = B""
UpperCamelCase , UpperCamelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
UpperCamelCase = 0
for raw in iterator:
acc += raw
if stream and len(_SCREAMING_SNAKE_CASE ) < chunk_len:
UpperCamelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
UpperCamelCase = (_stride_left, stride_right)
UpperCamelCase = {"raw": acc[:chunk_len], "stride": stride}
if stream:
UpperCamelCase = False
yield item
UpperCamelCase = stride_left
UpperCamelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_SCREAMING_SNAKE_CASE ) > stride_left:
UpperCamelCase = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
UpperCamelCase = False
yield item
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = 2**24 # 16Mo
try:
with subprocess.Popen(_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=_SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
UpperCamelCase = ffmpeg_process.stdout.read(_SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 359 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 244 | 0 |
def lowercase_ ( _A : int | float | str ):
"""simple docstring"""
try:
lowerCamelCase__ : Optional[Any] = float(_A )
except ValueError:
raise ValueError("Please enter a valid number" )
lowerCamelCase__ : Union[str, Any] = decimal - int(_A )
if fractional_part == 0:
return int(_A ), 1
else:
lowerCamelCase__ : Any = len(str(_A ).split("." )[1] )
lowerCamelCase__ : Any = int(decimal * (10**number_of_frac_digits) )
lowerCamelCase__ : Optional[int] = 10**number_of_frac_digits
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = denominator, numerator
while True:
lowerCamelCase__ : str = dividend % divisor
if remainder == 0:
break
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = divisor, remainder
lowerCamelCase__ , lowerCamelCase__ : Tuple = numerator / divisor, denominator / divisor
return int(_A ), int(_A )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(8_9.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 184 |
class _lowercase :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = size
lowerCamelCase__ : List[str] = [0] * size
lowerCamelCase__ : str = [0] * size
@staticmethod
def lowerCAmelCase ( __lowerCamelCase : int ):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def lowerCAmelCase ( __lowerCamelCase : int ):
'''simple docstring'''
return (index & (index + 1)) - 1
def lowerCAmelCase ( self : int , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = value
while index < self.size:
lowerCamelCase__ : Tuple = self.get_prev(__lowerCamelCase ) + 1
if current_left_border == index:
lowerCamelCase__ : Optional[Any] = value
else:
lowerCamelCase__ : str = max(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Dict = self.get_next(__lowerCamelCase )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
right -= 1 # Because of right is exclusive
lowerCamelCase__ : str = 0
while left <= right:
lowerCamelCase__ : Optional[Any] = self.get_prev(__lowerCamelCase )
if left <= current_left:
lowerCamelCase__ : Optional[Any] = max(__lowerCamelCase , self.tree[right] )
lowerCamelCase__ : Any = current_left
else:
lowerCamelCase__ : Optional[Any] = max(__lowerCamelCase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 184 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [0 for i in range(r + 1 )]
# nc0 = 1
__SCREAMING_SNAKE_CASE = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ , lowerCAmelCase_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 195 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A_ = 50_00_00
A_ , A_ = os.path.split(__file__)
A_ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def UpperCAmelCase__ (snake_case__ : datasets.Dataset , **snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Tuple = dataset.map(**snake_case__ )
@get_duration
def UpperCAmelCase__ (snake_case__ : datasets.Dataset , **snake_case__ : Any ):
"""simple docstring"""
_snake_case : List[str] = dataset.filter(**snake_case__ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Dict = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case : Dict = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
_snake_case : List[Any] = generate_example_dataset(
os.path.join(snake_case__ , """dataset.arrow""" ) , snake_case__ , num_examples=snake_case__ )
_snake_case : List[Any] = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=snake_case__ )
def tokenize(snake_case__ : Optional[int] ):
return tokenizer(examples["""text"""] )
_snake_case : str = map(snake_case__ )
_snake_case : Optional[int] = map(snake_case__ , batched=snake_case__ )
_snake_case : int = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ )
with dataset.formatted_as(type="""numpy""" ):
_snake_case : Dict = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ )
with dataset.formatted_as(type="""pandas""" ):
_snake_case : List[str] = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
_snake_case : Union[str, Any] = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
_snake_case : List[str] = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ )
_snake_case : Dict = map(snake_case__ , function=snake_case__ , batched=snake_case__ )
_snake_case : List[str] = filter(snake_case__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(snake_case__ , """wb""" ) as f:
f.write(json.dumps(snake_case__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 64 |
'''simple docstring'''
import argparse
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
_snake_case = f.readlines()
_snake_case = f"""class {class_name}("""
_snake_case = f"""{4 * " "}def {test_name}("""
_snake_case = f"""{8 * " "}{correct_line.split()[0]}"""
_snake_case = f"""{16 * " "}{correct_line.split()[0]}"""
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = 0
_snake_case = 0
_snake_case = []
for line in lines:
if line.startswith(_SCREAMING_SNAKE_CASE ):
_snake_case = True
elif in_class and line.startswith(_SCREAMING_SNAKE_CASE ):
_snake_case = True
elif in_class and in_func and (line.startswith(_SCREAMING_SNAKE_CASE ) or line.startswith(_SCREAMING_SNAKE_CASE )):
_snake_case = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_snake_case = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_snake_case = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * " "}{correct_line}""" )
_snake_case = _snake_case = _snake_case = _snake_case = False
else:
new_lines.append(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
for line in new_lines:
f.write(_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if fail is not None:
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
_snake_case = {l.strip() for l in f.readlines()}
else:
_snake_case = None
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
_snake_case = f.readlines()
_snake_case = defaultdict(_SCREAMING_SNAKE_CASE )
for line in correct_lines:
_snake_case, _snake_case, _snake_case, _snake_case = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
__lowerCAmelCase = parser.parse_args()
main(args.correct_filename, args.fail_filename) | 341 | 0 |
import pprint
import requests
SCREAMING_SNAKE_CASE_:str = """https://zenquotes.io/api"""
def __UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Any = random_quotes()
pprint.pprint(response)
| 365 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_:Optional[int] = {
"""configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""],
"""tokenization_tapas""": ["""TapasTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TapasForMaskedLM""",
"""TapasForQuestionAnswering""",
"""TapasForSequenceClassification""",
"""TapasModel""",
"""TapasPreTrainedModel""",
"""load_tf_weights_in_tapas""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[Any] = [
"""TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFTapasForMaskedLM""",
"""TFTapasForQuestionAnswering""",
"""TFTapasForSequenceClassification""",
"""TFTapasModel""",
"""TFTapasPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 115 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowerCamelCase_ : str = TypeVar("""T""")
class __A ( Generic[T] ):
"""simple docstring"""
def __init__( self , __A , __A ) -> None:
a =None
a =len(__lowercase )
a =[any_type for _ in range(self.N )] + arr
a =fnc
self.build()
def SCREAMING_SNAKE_CASE ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
a =self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> None:
p += self.N
a =v
while p > 1:
a =p // 2
a =self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> T | None: # noqa: E741
a =l + self.N, r + self.N
a =None
while l <= r:
if l % 2 == 1:
a =self.st[l] if res is None else self.fn(__lowercase , self.st[l] )
if r % 2 == 0:
a =self.st[r] if res is None else self.fn(__lowercase , self.st[r] )
a =(l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowerCamelCase_ : int = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowerCamelCase_ : Union[str, Any] = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowerCamelCase_ : Optional[int] = SegmentTree(test_array, min)
lowerCamelCase_ : Any = SegmentTree(test_array, max)
lowerCamelCase_ : Union[str, Any] = SegmentTree(test_array, lambda a, b: a + b)
def _A ( ):
"""simple docstring"""
for i in range(len(lowercase ) ):
for j in range(lowercase , len(lowercase ) ):
a =reduce(lowercase , test_array[i : j + 1] )
a =reduce(lowercase , test_array[i : j + 1] )
a =reduce(lambda lowercase , lowercase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowercase , lowercase )
assert max_range == max_segment_tree.query(lowercase , lowercase )
assert sum_range == sum_segment_tree.query(lowercase , lowercase )
test_all_segments()
for index, value in test_updates.items():
lowerCamelCase_ : str = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments() | 81 | import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = """deformable_detr"""
a__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __lowercase=True , __lowercase=None , __lowercase=3 , __lowercase=300 , __lowercase=1_024 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=0.0 , __lowercase=True , __lowercase="relu" , __lowercase=256 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1.0 , __lowercase=True , __lowercase=False , __lowercase="sine" , __lowercase="resnet50" , __lowercase=True , __lowercase=False , __lowercase=4 , __lowercase=4 , __lowercase=4 , __lowercase=False , __lowercase=300 , __lowercase=False , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=1 , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=0.1 , __lowercase=0.25 , __lowercase=False , **__lowercase , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
__UpperCamelCase :str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__lowercase , __lowercase):
__UpperCamelCase :str = backbone_config.get('''model_type''')
__UpperCamelCase :Tuple = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase :Any = config_class.from_dict(__lowercase)
__UpperCamelCase :int = use_timm_backbone
__UpperCamelCase :Dict = backbone_config
__UpperCamelCase :Any = num_channels
__UpperCamelCase :Optional[int] = num_queries
__UpperCamelCase :Any = max_position_embeddings
__UpperCamelCase :str = d_model
__UpperCamelCase :Tuple = encoder_ffn_dim
__UpperCamelCase :Union[str, Any] = encoder_layers
__UpperCamelCase :List[Any] = encoder_attention_heads
__UpperCamelCase :Any = decoder_ffn_dim
__UpperCamelCase :List[str] = decoder_layers
__UpperCamelCase :int = decoder_attention_heads
__UpperCamelCase :str = dropout
__UpperCamelCase :Any = attention_dropout
__UpperCamelCase :int = activation_dropout
__UpperCamelCase :List[Any] = activation_function
__UpperCamelCase :List[Any] = init_std
__UpperCamelCase :List[Any] = init_xavier_std
__UpperCamelCase :int = encoder_layerdrop
__UpperCamelCase :str = auxiliary_loss
__UpperCamelCase :Optional[Any] = position_embedding_type
__UpperCamelCase :Union[str, Any] = backbone
__UpperCamelCase :Any = use_pretrained_backbone
__UpperCamelCase :str = dilation
# deformable attributes
__UpperCamelCase :Optional[Any] = num_feature_levels
__UpperCamelCase :str = encoder_n_points
__UpperCamelCase :int = decoder_n_points
__UpperCamelCase :Union[str, Any] = two_stage
__UpperCamelCase :Optional[Any] = two_stage_num_proposals
__UpperCamelCase :Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''')
# Hungarian matcher
__UpperCamelCase :Optional[int] = class_cost
__UpperCamelCase :List[Any] = bbox_cost
__UpperCamelCase :str = giou_cost
# Loss coefficients
__UpperCamelCase :Tuple = mask_loss_coefficient
__UpperCamelCase :Tuple = dice_loss_coefficient
__UpperCamelCase :int = bbox_loss_coefficient
__UpperCamelCase :Any = giou_loss_coefficient
__UpperCamelCase :Dict = eos_coefficient
__UpperCamelCase :Optional[Any] = focal_alpha
__UpperCamelCase :Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=__lowercase , **__lowercase)
@property
def UpperCamelCase__ ( self) -> int:
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self) -> int:
return self.d_model
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__UpperCamelCase :Tuple = self.backbone_config.to_dict()
__UpperCamelCase :List[Any] = self.__class__.model_type
return output
| 43 | 0 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__(self :Any , _UpperCamelCase :List[str] , _UpperCamelCase :Optional[Any]=13 , _UpperCamelCase :Union[str, Any]=7 , _UpperCamelCase :Optional[Any]=True , _UpperCamelCase :Optional[int]=True , _UpperCamelCase :int=True , _UpperCamelCase :Optional[Any]=True , _UpperCamelCase :Optional[Any]=99 , _UpperCamelCase :Union[str, Any]=32 , _UpperCamelCase :Tuple=5 , _UpperCamelCase :Tuple=4 , _UpperCamelCase :List[str]=37 , _UpperCamelCase :Tuple="gelu" , _UpperCamelCase :Optional[int]=0.1 , _UpperCamelCase :List[str]=0.1 , _UpperCamelCase :Any=128 , _UpperCamelCase :Any=32 , _UpperCamelCase :Union[str, Any]=16 , _UpperCamelCase :Optional[int]=2 , _UpperCamelCase :List[str]=0.0_2 , _UpperCamelCase :List[Any]=3 , _UpperCamelCase :Dict=4 , _UpperCamelCase :Union[str, Any]=None , )-> List[str]:
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
def _lowerCAmelCase (self :Any )-> Union[str, Any]:
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A = ids_tensor([self.batch_size] , self.num_choices )
__A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase (self :Optional[int] )-> Dict:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def _lowerCAmelCase (self :List[Any] )-> Optional[int]:
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = self.prepare_config_and_inputs()
__A = True
__A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase (self :Dict , _UpperCamelCase :Dict , _UpperCamelCase :Optional[int] , _UpperCamelCase :str , _UpperCamelCase :Any , _UpperCamelCase :int , _UpperCamelCase :int , _UpperCamelCase :Any )-> Optional[int]:
__A = NezhaModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__A = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
__A = model(_UpperCamelCase , token_type_ids=_UpperCamelCase )
__A = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :List[str] , _UpperCamelCase :Optional[int] , _UpperCamelCase :Dict , _UpperCamelCase :Optional[int] , _UpperCamelCase :List[str] , _UpperCamelCase :int , _UpperCamelCase :int , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :str , )-> str:
__A = True
__A = NezhaModel(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__A = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , )
__A = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , )
__A = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCAmelCase (self :str , _UpperCamelCase :List[str] , _UpperCamelCase :Dict , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :Tuple , _UpperCamelCase :Tuple , _UpperCamelCase :Optional[int] , _UpperCamelCase :str )-> List[Any]:
__A = NezhaForMaskedLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__A = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :List[Any] , _UpperCamelCase :Optional[int] , _UpperCamelCase :List[str] , _UpperCamelCase :List[str] , _UpperCamelCase :Any , _UpperCamelCase :int , _UpperCamelCase :Optional[Any] )-> str:
__A = NezhaForNextSentencePrediction(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__A = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :Any , _UpperCamelCase :List[str] , _UpperCamelCase :int , _UpperCamelCase :Any , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :List[Any] , _UpperCamelCase :Union[str, Any] )-> Union[str, Any]:
__A = NezhaForPreTraining(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__A = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , next_sentence_label=_UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCAmelCase (self :Dict , _UpperCamelCase :str , _UpperCamelCase :int , _UpperCamelCase :Optional[int] , _UpperCamelCase :Any , _UpperCamelCase :str , _UpperCamelCase :Dict , _UpperCamelCase :List[Any] )-> Any:
__A = NezhaForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__A = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase (self :str , _UpperCamelCase :int , _UpperCamelCase :Tuple , _UpperCamelCase :Tuple , _UpperCamelCase :str , _UpperCamelCase :str , _UpperCamelCase :Tuple , _UpperCamelCase :int )-> Dict:
__A = self.num_labels
__A = NezhaForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__A = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :List[Any] , _UpperCamelCase :Tuple , _UpperCamelCase :str , _UpperCamelCase :List[str] , _UpperCamelCase :Any , _UpperCamelCase :List[Any] , _UpperCamelCase :str )-> int:
__A = self.num_labels
__A = NezhaForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__A = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :Tuple , _UpperCamelCase :List[str] , _UpperCamelCase :int , _UpperCamelCase :Tuple , _UpperCamelCase :Dict , _UpperCamelCase :Tuple , _UpperCamelCase :List[str] )-> Optional[int]:
__A = self.num_choices
__A = NezhaForMultipleChoice(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase (self :Any )-> List[str]:
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :Tuple , _UpperCamelCase :Any , _UpperCamelCase :Any=False )-> List[Any]:
__A = super()._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
if return_labels:
if model_class in get_values(_UpperCamelCase ):
__A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCamelCase )
__A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
return inputs_dict
def _lowerCAmelCase (self :Optional[Any] )-> List[Any]:
__A = NezhaModelTester(self )
__A = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def _lowerCAmelCase (self :Union[str, Any] )-> Any:
self.config_tester.run_common_tests()
def _lowerCAmelCase (self :Tuple )-> Dict:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def _lowerCAmelCase (self :Optional[int] )-> Optional[Any]:
__A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_UpperCamelCase )
def _lowerCAmelCase (self :Union[str, Any] )-> Dict:
# This regression test was failing with PyTorch < 1.3
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__A = None
self.model_tester.create_and_check_model_as_decoder(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
def _lowerCAmelCase (self :Optional[Any] )-> List[Any]:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def _lowerCAmelCase (self :Dict )-> Union[str, Any]:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCamelCase )
def _lowerCAmelCase (self :List[Any] )-> Dict:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_UpperCamelCase )
def _lowerCAmelCase (self :List[Any] )-> List[str]:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCamelCase )
def _lowerCAmelCase (self :List[Any] )-> List[Any]:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
def _lowerCAmelCase (self :Union[str, Any] )-> Optional[int]:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase )
def _lowerCAmelCase (self :List[str] )-> str:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
@slow
def _lowerCAmelCase (self :List[Any] )-> Any:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = NezhaModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@slow
@require_torch_gpu
def _lowerCAmelCase (self :Optional[int] )-> Union[str, Any]:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__A = True
__A = model_class(config=_UpperCamelCase )
__A = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
__A = torch.jit.trace(
_UpperCamelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_UpperCamelCase , os.path.join(_UpperCamelCase , '''bert.pt''' ) )
__A = torch.jit.load(os.path.join(_UpperCamelCase , '''bert.pt''' ) , map_location=_UpperCamelCase )
loaded(inputs_dict['''input_ids'''].to(_UpperCamelCase ) , inputs_dict['''attention_mask'''].to(_UpperCamelCase ) )
@require_torch
class A_ ( unittest.TestCase ):
@slow
def _lowerCAmelCase (self :List[Any] )-> Tuple:
__A = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
__A = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__A = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__A = model(_UpperCamelCase , attention_mask=_UpperCamelCase )[0]
__A = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _UpperCamelCase )
__A = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCamelCase , atol=1e-4 ) )
@slow
def _lowerCAmelCase (self :Optional[Any] )-> Dict:
__A = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
__A = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__A = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__A = model(_UpperCamelCase , attention_mask=_UpperCamelCase )[0]
__A = torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , _UpperCamelCase )
__A = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCamelCase , atol=1e-4 ) )
| 250 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _a ( lowerCamelCase: List[Any] ) -> List[Any]: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _a ( ) -> Dict:
'''simple docstring'''
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
__A = [1, 2, 3]
with pytest.raises(lowerCamelCase ):
with parallel_backend('''unsupported backend''' ):
map_nested(lowerCamelCase , lowerCamelCase , num_proc=2 )
with pytest.raises(lowerCamelCase ):
with parallel_backend('''unsupported backend''' ):
map_nested(lowerCamelCase , lowerCamelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def _a ( lowerCamelCase: Optional[int] ) -> List[str]:
'''simple docstring'''
__A = [1, 2]
__A = {'''a''': 1, '''b''': 2}
__A = {'''a''': [1, 2], '''b''': [3, 4]}
__A = {'''a''': {'''1''': 1}, '''b''': 2}
__A = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
__A = [2, 3]
__A = {'''a''': 2, '''b''': 3}
__A = {'''a''': [2, 3], '''b''': [4, 5]}
__A = {'''a''': {'''1''': 2}, '''b''': 3}
__A = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) == expected_map_nested_sa
assert map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) == expected_map_nested_sa
assert map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) == expected_map_nested_sa
assert map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) == expected_map_nested_sa
assert map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) == expected_map_nested_sa
| 250 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
A__ : str = {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json''',
'''google/bigbird-roberta-large''': '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json''',
'''google/bigbird-base-trivia-itc''': '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json''',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __snake_case ( UpperCamelCase_ ):
_a = '''big_bird'''
def __init__( self : List[Any] , A_ : Optional[Any]=5_0_3_5_8 , A_ : List[str]=7_6_8 , A_ : Any=1_2 , A_ : List[Any]=1_2 , A_ : int=3_0_7_2 , A_ : int="gelu_new" , A_ : int=0.1 , A_ : Optional[int]=0.1 , A_ : str=4_0_9_6 , A_ : Optional[int]=2 , A_ : str=0.02 , A_ : Union[str, Any]=1e-12 , A_ : List[str]=True , A_ : Tuple=0 , A_ : Optional[Any]=1 , A_ : Tuple=2 , A_ : Union[str, Any]=6_6 , A_ : Any="block_sparse" , A_ : int=True , A_ : Optional[Any]=False , A_ : List[Any]=6_4 , A_ : Any=3 , A_ : Optional[Any]=None , **A_ : Tuple , ):
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , sep_token_id=A_ , **A_ , )
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Any = max_position_embeddings
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Any = initializer_range
lowerCAmelCase_ : Union[str, Any] = type_vocab_size
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Any = use_cache
lowerCAmelCase_ : Optional[int] = rescale_embeddings
lowerCAmelCase_ : List[Any] = attention_type
lowerCAmelCase_ : Dict = use_bias
lowerCAmelCase_ : Tuple = block_size
lowerCAmelCase_ : str = num_random_blocks
lowerCAmelCase_ : Dict = classifier_dropout
class __snake_case ( UpperCamelCase_ ):
@property
def UpperCAmelCase__ ( self : Dict):
if self.task == "multiple-choice":
lowerCAmelCase_ : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase_ : str = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 103 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase_ = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase_ = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase_ = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase_ = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase_ = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase_ = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase_ = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
snake_case_ , snake_case_ : Any = randrange(len(_UpperCamelCase ) ), randrange(len(_UpperCamelCase ) )
snake_case_ : Any = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
snake_case_ , snake_case_ : Tuple = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCamelCase_ ( _UpperCamelCase = 100 ) -> str:
"""simple docstring"""
return (generate_random_hand() for _ in range(_UpperCamelCase ))
@pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
snake_case_ : str = PokerHand(_UpperCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
snake_case_ : Dict = [PokerHand(_UpperCamelCase ) for hand in SORTED_HANDS]
snake_case_ : str = poker_hands.copy()
shuffle(_UpperCamelCase )
snake_case_ : List[str] = chain(sorted(_UpperCamelCase ) )
for index, hand in enumerate(_UpperCamelCase ):
assert hand == poker_hands[index]
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=_UpperCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
snake_case_ : Dict = PokerHand('''2C 4S AS 3D 5C''' )
snake_case_ : str = True
snake_case_ : Tuple = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
snake_case_ : List[str] = 0
snake_case_ : Union[str, Any] = os.path.abspath(os.path.dirname(_UpperCamelCase ) )
snake_case_ : Dict = os.path.join(_UpperCamelCase , '''poker_hands.txt''' )
with open(_UpperCamelCase ) as file_hand:
for line in file_hand:
snake_case_ : Dict = line[:14].strip()
snake_case_ : List[str] = line[15:].strip()
snake_case_ , snake_case_ : str = PokerHand(_UpperCamelCase ), PokerHand(_UpperCamelCase )
snake_case_ : int = player.compare_with(_UpperCamelCase )
if output == "Win":
answer += 1
assert answer == 376
| 279 | 0 |
"""simple docstring"""
lowerCAmelCase__ = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609344,
"knot": 1.852,
}
lowerCAmelCase__ = {
"km/h": 1.0,
"m/s": 0.277777778,
"mph": 0.621371192,
"knot": 0.539956803,
}
def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
lowerCAmelCase : Dict = (
f"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
f"""Valid values are: {", ".join(SCREAMING_SNAKE_CASE )}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 133 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : List[str] =None
a : List[Any] =BloomTokenizerFast
a : Optional[int] =BloomTokenizerFast
a : Optional[Any] =True
a : Dict =False
a : Optional[Any] ="tokenizer_file"
a : Optional[int] ={"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase : Tuple = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.get_rust_tokenizer()
lowerCAmelCase : List[Any] = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
lowerCAmelCase : str = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
lowerCAmelCase : Optional[int] = tokenizer.batch_encode_plus(snake_case__ )["input_ids"]
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase : Optional[int] = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__=6 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowerCAmelCase : str = "This is a simple input"
lowerCAmelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase : Any = ("This is a simple input", "This is a pair")
lowerCAmelCase : Tuple = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(snake_case__ , max_length=snake_case__ )
tokenizer_r.encode_plus(snake_case__ , max_length=snake_case__ )
tokenizer_r.batch_encode_plus(snake_case__ , max_length=snake_case__ )
tokenizer_r.encode(snake_case__ , max_length=snake_case__ )
tokenizer_r.batch_encode_plus(snake_case__ , max_length=snake_case__ )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
lowerCAmelCase : Tuple = None # Hotfixing padding = None
self.assertRaises(snake_case__ , tokenizer_r.encode , snake_case__ , max_length=snake_case__ , padding="max_length" )
# Simple input
self.assertRaises(snake_case__ , tokenizer_r.encode_plus , snake_case__ , max_length=snake_case__ , padding="max_length" )
# Simple input
self.assertRaises(
snake_case__ , tokenizer_r.batch_encode_plus , snake_case__ , max_length=snake_case__ , padding="max_length" , )
# Pair input
self.assertRaises(snake_case__ , tokenizer_r.encode , snake_case__ , max_length=snake_case__ , padding="max_length" )
# Pair input
self.assertRaises(snake_case__ , tokenizer_r.encode_plus , snake_case__ , max_length=snake_case__ , padding="max_length" )
# Pair input
self.assertRaises(
snake_case__ , tokenizer_r.batch_encode_plus , snake_case__ , max_length=snake_case__ , padding="max_length" , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.get_rust_tokenizer()
lowerCAmelCase : int = load_dataset("xnli" , "all_languages" , split="test" , streaming=snake_case__ )
lowerCAmelCase : Tuple = next(iter(snake_case__ ) )["premise"] # pick up one data
lowerCAmelCase : Optional[Any] = list(sample_data.values() )
lowerCAmelCase : int = list(map(tokenizer.encode , snake_case__ ) )
lowerCAmelCase : List[Any] = [tokenizer.decode(snake_case__ , clean_up_tokenization_spaces=snake_case__ ) for x in output_tokens]
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 133 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "Speech2TextFeatureExtractor"
snake_case__ : Union[str, Any] = "Speech2TextTokenizer"
def __init__( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : str = False
def __call__( self : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_lowerCamelCase : str = kwargs.pop('''raw_speech''' )
else:
_lowerCamelCase : Tuple = kwargs.pop('''audio''' , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = kwargs.pop('''text''' , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
_lowerCamelCase : List[Any] = args[0]
_lowerCamelCase : int = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_lowerCamelCase : List[Any] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
_lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCamelCase : List[str] = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@contextmanager
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Any = self.tokenizer
yield
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : Tuple = False
| 72 |
__UpperCAmelCase : int = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 111 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> dict[str, float]:
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x20000 and cp <= 0x2a6df) #
or (cp >= 0x2a700 and cp <= 0x2b73f) #
or (cp >= 0x2b740 and cp <= 0x2b81f) #
or (cp >= 0x2b820 and cp <= 0x2ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2f800 and cp <= 0x2fa1f) #
): #
return True
return False
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
for char in word:
lowercase_ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = set()
for token in tokens:
lowercase_ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowercase_ = list(__lowerCAmelCase )
return word_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowercase_ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowercase_ = bert_tokens
lowercase_ , lowercase_ = 0, len(__lowerCAmelCase )
while start < end:
lowercase_ = True
if is_chinese(bert_word[start] ):
lowercase_ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowercase_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase_ = """##""" + bert_word[j]
lowercase_ = start + i
lowercase_ = False
break
if single_word:
start += 1
return bert_word
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
lowercase_ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = []
for id in input_ids:
lowercase_ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowercase_ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowercase_ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase_ = LTP(args.ltp ) # faster in GPU device
lowercase_ = BertTokenizer.from_pretrained(args.bert )
lowercase_ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowercase_ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
UpperCAmelCase : int = parser.parse_args()
main(args)
| 313 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _UpperCAmelCase (UpperCamelCase__ : str ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_A : Dict = model_type_to_module_name(UpperCamelCase__ )
_A : Tuple = importlib.import_module(f".{module_name}" , "transformers.models" )
try:
return getattr(UpperCamelCase__ , UpperCamelCase__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(UpperCamelCase__ , "__name__" , UpperCamelCase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_A : Any = importlib.import_module("transformers" )
if hasattr(UpperCamelCase__ , UpperCamelCase__ ):
return getattr(UpperCamelCase__ , UpperCamelCase__ )
return None
def _UpperCAmelCase (UpperCamelCase__ : Union[str, os.PathLike] , UpperCamelCase__ : Optional[Union[str, os.PathLike]] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[Dict[str, str]] = None , UpperCamelCase__ : Optional[Union[bool, str]] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[int] , ):
_A : Tuple = get_file_from_repo(
UpperCamelCase__ , UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , resume_download=UpperCamelCase__ , proxies=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , revision=UpperCamelCase__ , local_files_only=UpperCamelCase__ , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(UpperCamelCase__ , encoding="utf-8" ) as reader:
return json.load(UpperCamelCase__ )
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self) -> Any:
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.")
@classmethod
@replace_list_option_in_docstrings(__lowerCamelCase)
def _lowerCamelCase ( cls , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
_A : Optional[int] = kwargs.pop("config" , __lowerCamelCase)
_A : Tuple = kwargs.pop("trust_remote_code" , __lowerCamelCase)
_A : List[Any] = True
_A , _A : Optional[int] = FeatureExtractionMixin.get_feature_extractor_dict(__lowerCamelCase , **__lowerCamelCase)
_A : List[Any] = config_dict.get("feature_extractor_type" , __lowerCamelCase)
_A : int = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {}):
_A : Any = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__lowerCamelCase , __lowerCamelCase):
_A : str = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase)
# It could be in `config.feature_extractor_type``
_A : List[Any] = getattr(__lowerCamelCase , "feature_extractor_type" , __lowerCamelCase)
if hasattr(__lowerCamelCase , "auto_map") and "AutoFeatureExtractor" in config.auto_map:
_A : Optional[int] = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
_A : List[Any] = feature_extractor_class_from_name(__lowerCamelCase)
_A : Any = feature_extractor_auto_map is not None
_A : Optional[int] = feature_extractor_class is not None or type(__lowerCamelCase) in FEATURE_EXTRACTOR_MAPPING
_A : int = resolve_trust_remote_code(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
if has_remote_code and trust_remote_code:
_A : List[Any] = get_class_from_dynamic_module(
__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
_A : List[str] = kwargs.pop("code_revision" , __lowerCamelCase)
if os.path.isdir(__lowerCamelCase):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__lowerCamelCase , **__lowerCamelCase)
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__lowerCamelCase , **__lowerCamelCase)
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__lowerCamelCase) in FEATURE_EXTRACTOR_MAPPING:
_A : Dict = FEATURE_EXTRACTOR_MAPPING[type(__lowerCamelCase)]
return feature_extractor_class.from_dict(__lowerCamelCase , **__lowerCamelCase)
raise ValueError(
F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}")
@staticmethod
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]:
FEATURE_EXTRACTOR_MAPPING.register(__lowerCamelCase , __lowerCamelCase)
| 11 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def A_ ( _UpperCAmelCase , _UpperCAmelCase=10 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = []
for _ in range(_UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def A_ ( _UpperCAmelCase , _UpperCAmelCase=10 ):
SCREAMING_SNAKE_CASE_: List[str] = []
for step in range(_UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(_UpperCAmelCase , "schedule.bin" )
torch.save(scheduler.state_dict() , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.load(_UpperCAmelCase )
scheduler.load_state_dict(_UpperCAmelCase )
return lrs
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple):
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__))
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = torch.tensor([0.4, 0.2, -0.5])
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: int = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0)
for _ in range(100):
SCREAMING_SNAKE_CASE_: Dict = criterion(lowerCAmelCase__ , lowerCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = torch.tensor([0.4, 0.2, -0.5])
SCREAMING_SNAKE_CASE_: Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: int = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCAmelCase__ , weight_decay=0.0 , relative_step=lowerCAmelCase__ , scale_parameter=lowerCAmelCase__ , warmup_init=lowerCAmelCase__ , )
for _ in range(1000):
SCREAMING_SNAKE_CASE_: List[Any] = criterion(lowerCAmelCase__ , lowerCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
_UpperCAmelCase : List[Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
_UpperCAmelCase : Optional[Any] = 10
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any]=None):
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__))
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ , msg=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
SCREAMING_SNAKE_CASE_: Dict = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = data
SCREAMING_SNAKE_CASE_: List[Any] = scheduler_func(self.optimizer , **lowerCAmelCase__)
self.assertEqual(len([scheduler.get_lr()[0]]) , 1)
SCREAMING_SNAKE_CASE_: int = unwrap_schedule(lowerCAmelCase__ , self.num_steps)
self.assertListAlmostEqual(
lowerCAmelCase__ , lowerCAmelCase__ , tol=1E-2 , msg=F"failed for {scheduler_func} in normal scheduler" , )
SCREAMING_SNAKE_CASE_: List[str] = scheduler_func(self.optimizer , **lowerCAmelCase__)
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCAmelCase__) # wrap to test picklability of the schedule
SCREAMING_SNAKE_CASE_: Tuple = unwrap_and_save_reload_schedule(lowerCAmelCase__ , self.num_steps)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ , msg=F"failed for {scheduler_func} in save and reload")
class __lowercase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_: List[Any] = fn
def __call__( self : Optional[int] , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Tuple):
return self.fn(*lowerCAmelCase__ , **lowerCAmelCase__)
@classmethod
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: str = list(map(self , scheduler.lr_lambdas))
| 13 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowercase__ : float , lowercase__ : float ) -> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 1 | 1 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __magic_name__ ( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, unittest.TestCase):
UpperCamelCase__ = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''})
UpperCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
torch.manual_seed(0 )
lowercase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowercase_ : str = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowercase_ : Tuple = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowercase_ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase_ : str = CLIPTextModel(__a )
lowercase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ : Union[str, Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : List[Any]=0 ):
if str(__a ).startswith("""mps""" ):
lowercase_ : Any = torch.manual_seed(__a )
else:
lowercase_ : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowercase_ : Union[str, Any] = 2
lowercase_ : List[Any] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__a , device=torch.device(__a ) , )
lowercase_ : Dict = floats_tensor(control_image.shape , rng=random.Random(__a ) ).to(__a )
lowercase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ : Optional[Any] = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((64, 64) )
lowercase_ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class __magic_name__ ( UpperCamelCase__, UpperCamelCase__, unittest.TestCase):
UpperCamelCase__ = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase__ = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
torch.manual_seed(0 )
lowercase_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowercase_ : Union[str, Any] ):
if isinstance(__a , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowercase_ : Optional[int] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__a )
torch.manual_seed(0 )
lowercase_ : Optional[int] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__a )
torch.manual_seed(0 )
lowercase_ : Optional[Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowercase_ : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase_ : Dict = CLIPTextModel(__a )
lowercase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ : Tuple = MultiControlNetModel([controlneta, controlneta] )
lowercase_ : Dict = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : int , lowercase_ : str=0 ):
if str(__a ).startswith("""mps""" ):
lowercase_ : Dict = torch.manual_seed(__a )
else:
lowercase_ : Any = torch.Generator(device=__a ).manual_seed(__a )
lowercase_ : Optional[Any] = 2
lowercase_ : List[Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__a , device=torch.device(__a ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__a , device=torch.device(__a ) , ),
]
lowercase_ : Any = floats_tensor(control_image[0].shape , rng=random.Random(__a ) ).to(__a )
lowercase_ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ : Dict = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((64, 64) )
lowercase_ : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[str] = self.get_dummy_components()
lowercase_ : Union[str, Any] = self.pipeline_class(**__a )
pipe.to(__a )
lowercase_ : List[Any] = 10.0
lowercase_ : List[Any] = 4
lowercase_ : int = self.get_dummy_inputs(__a )
lowercase_ : Optional[int] = steps
lowercase_ : str = scale
lowercase_ : Any = pipe(**__a )[0]
lowercase_ : int = self.get_dummy_inputs(__a )
lowercase_ : Optional[int] = steps
lowercase_ : List[str] = scale
lowercase_ : Union[str, Any] = pipe(**__a , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowercase_ : Tuple = self.get_dummy_inputs(__a )
lowercase_ : List[Any] = steps
lowercase_ : Tuple = scale
lowercase_ : str = pipe(**__a , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowercase_ : int = self.get_dummy_inputs(__a )
lowercase_ : int = steps
lowercase_ : Union[str, Any] = scale
lowercase_ : Optional[int] = pipe(**__a , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def SCREAMING_SNAKE_CASE_ ( self : Any ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Any = self.get_dummy_components()
lowercase_ : str = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__a )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Tuple = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
lowercase_ : str = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=__a , controlnet=__a )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__a )
lowercase_ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase_ : str = """evil space-punk bird"""
lowercase_ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
lowercase_ : Optional[int] = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
lowercase_ : Optional[int] = pipe(
__a , __a , control_image=__a , generator=__a , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
lowercase_ : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
lowercase_ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 239 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : int , *__a : Dict , **__a : str ):
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 1 | 0 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : list ):
"""simple docstring"""
lowerCamelCase__ : Dict =len(__lowerCamelCase )
for _ in range(__lowerCamelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCamelCase__ : List[str] =arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_lowercase : Union[str, Any] = list(range(1_0, 0, -1))
print(f'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 368 |
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case__ ( __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int , __lowerCamelCase : float = 1 , __lowerCamelCase : float = 1 , __lowerCamelCase : float = 1.0e4 , __lowerCamelCase : bool = False , __lowerCamelCase : float = 1.0 , ):
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
lowerCamelCase__ : Any =float(embedding_dim // 2 )
lowerCamelCase__ : List[str] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCamelCase__ : int =min_timescale * jnp.exp(jnp.arange(__lowerCamelCase , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCamelCase__ : Tuple =jnp.expand_dims(__lowerCamelCase , 1 ) * jnp.expand_dims(__lowerCamelCase , 0 )
# scale embeddings
lowerCamelCase__ : List[str] =scale * emb
if flip_sin_to_cos:
lowerCamelCase__ : int =jnp.concatenate([jnp.cos(__lowerCamelCase ), jnp.sin(__lowerCamelCase )] , axis=1 )
else:
lowerCamelCase__ : List[str] =jnp.concatenate([jnp.sin(__lowerCamelCase ), jnp.cos(__lowerCamelCase )] , axis=1 )
lowerCamelCase__ : str =jnp.reshape(__lowerCamelCase , [jnp.shape(__lowerCamelCase )[0], embedding_dim] )
return signal
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 3_2
_a = jnp.floataa
@nn.compact
def __call__( self : Optional[Any], lowerCamelCase : int )-> Any:
lowerCamelCase__ : Optional[Any] =nn.Dense(self.time_embed_dim, dtype=self.dtype, name='''linear_1''' )(lowerCamelCase )
lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase )
lowerCamelCase__ : Any =nn.Dense(self.time_embed_dim, dtype=self.dtype, name='''linear_2''' )(lowerCamelCase )
return temb
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 3_2
_a = False
_a = 1
@nn.compact
def __call__( self : Any, lowerCamelCase : int )-> int:
return get_sinusoidal_embeddings(
lowerCamelCase, embedding_dim=self.dim, flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.freq_shift )
| 272 | 0 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase_ ( __lowercase : str , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : str , __lowercase : List[Any] ) -> Dict:
'''simple docstring'''
with open(_lowercase ) as metadata_file:
_UpperCAmelCase = json.load(_lowercase )
_UpperCAmelCase = LukeConfig(use_entity_aware_attention=_lowercase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase = torch.load(_lowercase , map_location="cpu" )["module"]
# Load the entity vocab file
_UpperCAmelCase = load_original_entity_vocab(_lowercase )
# add an entry for [MASK2]
_UpperCAmelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase = AddedToken("<ent>" , lstrip=_lowercase , rstrip=_lowercase )
_UpperCAmelCase = AddedToken("<ent2>" , lstrip=_lowercase , rstrip=_lowercase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(_lowercase )
with open(os.path.join(_lowercase , "tokenizer_config.json" ) , "r" ) as f:
_UpperCAmelCase = json.load(_lowercase )
_UpperCAmelCase = "MLukeTokenizer"
with open(os.path.join(_lowercase , "tokenizer_config.json" ) , "w" ) as f:
json.dump(_lowercase , _lowercase )
with open(os.path.join(_lowercase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(_lowercase , _lowercase )
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_lowercase )
# Initialize the embeddings of the special tokens
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(["@"] )[0]
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(["#"] )[0]
_UpperCAmelCase = state_dict["embeddings.word_embeddings.weight"]
_UpperCAmelCase = word_emb[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = word_emb[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCAmelCase = state_dict[bias_name]
_UpperCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase = f'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase = state_dict["entity_embeddings.entity_embeddings.weight"]
_UpperCAmelCase = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCAmelCase = state_dict["entity_predictions.bias"]
_UpperCAmelCase = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCAmelCase = LukeForMaskedLM(config=_lowercase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
_UpperCAmelCase = state_dict[key]
else:
_UpperCAmelCase = state_dict[key]
_UpperCAmelCase , _UpperCAmelCase = model.load_state_dict(_lowercase , strict=_lowercase )
if set(_lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' )
if set(_lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_lowercase , task="entity_classification" )
_UpperCAmelCase = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
_UpperCAmelCase = (0, 9)
_UpperCAmelCase = tokenizer(_lowercase , entity_spans=[span] , return_tensors="pt" )
_UpperCAmelCase = model(**_lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 33, 768) )
_UpperCAmelCase = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowercase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 1, 768) )
_UpperCAmelCase = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _lowercase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_lowercase )
_UpperCAmelCase = "Tokyo is the capital of <mask>."
_UpperCAmelCase = (24, 30)
_UpperCAmelCase = tokenizer(_lowercase , entity_spans=[span] , return_tensors="pt" )
_UpperCAmelCase = model(**_lowercase )
_UpperCAmelCase = encoding["input_ids"][0].tolist()
_UpperCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
_UpperCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_lowercase )
_UpperCAmelCase = outputs.entity_logits[0][0].argmax().item()
_UpperCAmelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(_lowercase ) )
model.save_pretrained(_lowercase )
def UpperCAmelCase_ ( __lowercase : str ) -> Any:
'''simple docstring'''
_UpperCAmelCase = ["[MASK]", "[PAD]", "[UNK]"]
_UpperCAmelCase = [json.loads(_lowercase ) for line in open(_lowercase )]
_UpperCAmelCase = {}
for entry in data:
_UpperCAmelCase = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCAmelCase = entity_id
break
_UpperCAmelCase = f'{language}:{entity_name}'
_UpperCAmelCase = entity_id
return new_mapping
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__SCREAMING_SNAKE_CASE :Dict = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 22 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __lowerCamelCase (_a ):
_lowercase = """xlm-roberta"""
def __init__( self: Union[str, Any],A_: Union[str, Any]=3_0522,A_: Dict=768,A_: Union[str, Any]=12,A_: Any=12,A_: str=3072,A_: Union[str, Any]="gelu",A_: str=0.1,A_: Optional[int]=0.1,A_: List[Any]=512,A_: Optional[Any]=2,A_: Dict=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=1,A_: str=0,A_: str=2,A_: Optional[Any]="absolute",A_: Union[str, Any]=True,A_: int=None,**A_: Optional[Any],):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 310 | 0 |
import socket
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
A : int = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
A : Dict = socket.gethostname()
A : int = 1_2312
sock.connect((host, port) )
sock.send(B"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
A : Union[str, Any] = sock.recv(1024 )
if not data:
break
out_file.write(_lowerCAmelCase )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 361 |
import socket
def __UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
A : str = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
A : Union[str, Any] = socket.gethostname()
A : Dict = 1_2312
sock.connect((host, port) )
sock.send(B"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
A : Optional[int] = sock.recv(1024 )
if not data:
break
out_file.write(_lowerCAmelCase )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 115 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='ernie_m'
lowerCamelCase__ ={"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__(self , a_ = 25_00_02 , a_ = 7_68 , a_ = 12 , a_ = 12 , a_ = 30_72 , a_ = "gelu" , a_ = 0.1 , a_ = 0.1 , a_ = 5_14 , a_ = 0.02 , a_ = 1 , a_ = 1E-05 , a_=None , a_=False , a_=0.0 , **a_ , ):
'''simple docstring'''
super().__init__(pad_token_id=a_ , **a_ )
__snake_case : Union[str, Any] = vocab_size
__snake_case : Optional[Any] = hidden_size
__snake_case : Optional[Any] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : int = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Optional[Any] = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : str = classifier_dropout
__snake_case : Optional[int] = is_decoder
__snake_case : Optional[int] = act_dropout
| 102 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : str = """▁"""
SCREAMING_SNAKE_CASE : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =BigBirdTokenizer
lowerCamelCase__ =BigBirdTokenizerFast
lowerCamelCase__ =True
lowerCamelCase__ =True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().setUp()
__snake_case : List[Any] = self.tokenizer_class(a_ , keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = '''<s>'''
__snake_case : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(a_ ) , 10_04 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case : str = self.get_tokenizer()
__snake_case : Dict = self.get_rust_tokenizer()
__snake_case : Dict = '''I was born in 92000, and this is falsé.'''
__snake_case : int = tokenizer.tokenize(a_ )
__snake_case : str = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
__snake_case : Tuple = tokenizer.encode(a_ , add_special_tokens=a_ )
__snake_case : Tuple = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
__snake_case : Optional[Any] = self.get_rust_tokenizer()
__snake_case : Optional[int] = tokenizer.encode(a_ )
__snake_case : Dict = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = BigBirdTokenizer(a_ , keep_accents=a_ )
__snake_case : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [2_85, 46, 10, 1_70, 3_82] , )
__snake_case : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__snake_case : Tuple = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''Hello World!'''
__snake_case : List[Any] = [65, 1_85_36, 22_60, 1_01, 66]
self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
__snake_case : Optional[int] = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231
# fmt: on
self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__snake_case : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
__snake_case : Tuple = ''' '''.join(a_ )
__snake_case : Tuple = self.big_tokenizer.encode_plus(a_ , return_tensors='''pt''' , return_token_type_ids=a_ )
__snake_case : List[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=a_ )
__snake_case : Optional[int] = BigBirdConfig(attention_type='''original_full''' )
__snake_case : str = BigBirdModel(a_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a_ )
model(**a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
__snake_case : Any = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = {'''input_ids''': [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 102 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "mvp"
UpperCAmelCase__ : Optional[int] = ["past_key_values"]
UpperCAmelCase__ : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _a=5_0_2_6_7 , _a=1_0_2_4 , _a=1_2 , _a=4_0_9_6 , _a=1_6 , _a=1_2 , _a=4_0_9_6 , _a=1_6 , _a=0.0 , _a=0.0 , _a="gelu" , _a=1_0_2_4 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.02 , _a=0.0 , _a=False , _a=True , _a=1 , _a=0 , _a=2 , _a=True , _a=2 , _a=2 , _a=False , _a=1_0_0 , _a=8_0_0 , **_a , ) -> List[str]:
_a : Any = vocab_size
_a : Optional[int] = max_position_embeddings
_a : str = d_model
_a : Union[str, Any] = encoder_ffn_dim
_a : Tuple = encoder_layers
_a : str = encoder_attention_heads
_a : List[str] = decoder_ffn_dim
_a : List[Any] = decoder_layers
_a : Union[str, Any] = decoder_attention_heads
_a : List[str] = dropout
_a : Any = attention_dropout
_a : str = activation_dropout
_a : List[str] = activation_function
_a : Dict = init_std
_a : List[Any] = encoder_layerdrop
_a : List[str] = decoder_layerdrop
_a : List[Any] = classifier_dropout
_a : int = use_cache
_a : Optional[int] = encoder_layers
_a : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_a : List[str] = use_prompt
_a : Optional[Any] = prompt_length
_a : int = prompt_mid_dim
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , **_a , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , _a ):
_a : List[str] = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
| 15 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a__ = random.Random()
def __UpperCAmelCase ( __a : Tuple ,__a : str=1.0 ,__a : Optional[int]=None ,__a : List[Any]=None ) -> Any:
"""simple docstring"""
if rng is None:
_a : Dict = global_rng
_a : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _a , _a=7 , _a=4_0_0 , _a=2_0_0_0 , _a=2_0_4_8 , _a=1_2_8 , _a=1 , _a=5_1_2 , _a=3_0 , _a=4_4_1_0_0 , ) -> List[Any]:
_a : Optional[Any] = parent
_a : str = batch_size
_a : List[str] = min_seq_length
_a : str = max_seq_length
_a : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a : List[Any] = spectrogram_length
_a : List[str] = feature_size
_a : List[Any] = num_audio_channels
_a : Tuple = hop_length
_a : Optional[int] = chunk_length
_a : int = sampling_rate
def __lowercase ( self ) -> Union[str, Any]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __lowercase ( self , _a=False , _a=False ) -> List[Any]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_a : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a : str = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = TvltFeatureExtractor
def __lowercase ( self ) -> Dict:
_a : List[str] = TvltFeatureExtractionTester(self )
def __lowercase ( self ) -> Any:
_a : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_a , '''feature_size''' ) )
self.assertTrue(hasattr(_a , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_a , '''hop_length''' ) )
self.assertTrue(hasattr(_a , '''chunk_length''' ) )
self.assertTrue(hasattr(_a , '''sampling_rate''' ) )
def __lowercase ( self ) -> Optional[int]:
_a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_a : Dict = self.feature_extraction_class.from_pretrained(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Union[str, Any] = feat_extract_second.to_dict()
_a : Any = dict_first.pop('''mel_filters''' )
_a : int = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Optional[int]:
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = os.path.join(_a , '''feat_extract.json''' )
feat_extract_first.to_json_file(_a )
_a : List[str] = self.feature_extraction_class.from_json_file(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Dict = feat_extract_second.to_dict()
_a : str = dict_first.pop('''mel_filters''' )
_a : str = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
# Initialize feature_extractor
_a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_a : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_a : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_a : Dict = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_a : Union[str, Any] = feature_extractor(
_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_a : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_a : int = np.asarray(_a )
_a : Tuple = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_a : Optional[int] = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowercase ( self ) -> int:
_a : Union[str, Any] = self._load_datasamples(1 )
_a : int = TvltFeatureExtractor()
_a : Union[str, Any] = feature_extractor(_a , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
_a : Union[str, Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 15 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
a =TypeVar("""T""")
class A_ ( Generic[T] ):
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : list[T] ,SCREAMING_SNAKE_CASE__ : Callable[[T, T], T]):
__lowerCamelCase : Any | T = None
__lowerCamelCase : int = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : list[T] = [any_type for _ in range(self.N)] + arr
__lowerCamelCase : Optional[int] = fnc
self.build()
def lowerCAmelCase ( self : Dict):
for p in range(self.N - 1 ,0 ,-1):
__lowerCamelCase : List[str] = self.fn(self.st[p * 2] ,self.st[p * 2 + 1])
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : T):
p += self.N
__lowerCamelCase : Dict = v
while p > 1:
__lowerCamelCase : List[Any] = p // 2
__lowerCamelCase : List[Any] = self.fn(self.st[p * 2] ,self.st[p * 2 + 1])
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int): # noqa: E741
__lowerCamelCase , __lowerCamelCase : Dict = l + self.N, r + self.N
__lowerCamelCase : T | None = None
while l <= r:
if l % 2 == 1:
__lowerCamelCase : Dict = self.st[l] if res is None else self.fn(SCREAMING_SNAKE_CASE__ ,self.st[l])
if r % 2 == 0:
__lowerCamelCase : str = self.st[r] if res is None else self.fn(SCREAMING_SNAKE_CASE__ ,self.st[r])
__lowerCamelCase , __lowerCamelCase : Any = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
a =[1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
a ={
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
a =SegmentTree(test_array, min)
a =SegmentTree(test_array, max)
a =SegmentTree(test_array, lambda a, b: a + b)
def SCREAMING_SNAKE_CASE__ ( ) -> None:
for i in range(len(lowerCamelCase__ ) ):
for j in range(lowerCamelCase__ , len(lowerCamelCase__ ) ):
__lowerCamelCase : Any = reduce(lowerCamelCase__ , test_array[i : j + 1] )
__lowerCamelCase : Union[str, Any] = reduce(lowerCamelCase__ , test_array[i : j + 1] )
__lowerCamelCase : Dict = reduce(lambda lowerCamelCase__ , lowerCamelCase__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCamelCase__ , lowerCamelCase__ )
assert max_range == max_segment_tree.query(lowerCamelCase__ , lowerCamelCase__ )
assert sum_range == sum_segment_tree.query(lowerCamelCase__ , lowerCamelCase__ )
test_all_segments()
for index, value in test_updates.items():
a =value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 73 |
'''simple docstring'''
A__: Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__: Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__: Any = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 276 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCAmelCase__ :Any = None
lowerCAmelCase__ :List[str] = logging.get_logger(__name__)
lowerCAmelCase__ :Any = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ :Any = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ :Dict = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
lowerCAmelCase__ :Union[str, Any] = '''▁'''
# Segments (not really needed)
lowerCAmelCase__ :int = 0
lowerCAmelCase__ :Optional[Any] = 1
lowerCAmelCase__ :List[str] = 2
lowerCAmelCase__ :Optional[int] = 3
lowerCAmelCase__ :Optional[int] = 4
class __a ( UpperCAmelCase ):
_a : int = VOCAB_FILES_NAMES
_a : str = PRETRAINED_VOCAB_FILES_MAP
_a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Optional[int] = 'left'
_a : Optional[Any] = XLNetTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<sep>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<cls>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=["<eop>", "<eod>"] , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
_UpperCAmelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 185 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __a ( UpperCAmelCase ):
_a : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 185 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float ) -> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:]
UpperCAmelCase_ = max(len(snake_case_ ) , len(snake_case_ ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Optional[Any] ) -> str:
if isinstance(A__ , torch.Tensor ):
return image
elif isinstance(A__ , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__snake_case = np.concatenate(A__ , axis=0 )
__snake_case = np.array(A__ ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(A__ )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(A__ , dim=0 )
return image
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : int , snake_case_ : str=0.9_995 ) -> int:
if not isinstance(A__ , np.ndarray ):
__snake_case = True
__snake_case = va.device
__snake_case = va.cpu().numpy()
__snake_case = va.cpu().numpy()
__snake_case = np.sum(va * va / (np.linalg.norm(A__ ) * np.linalg.norm(A__ )) )
if np.abs(A__ ) > DOT_THRESHOLD:
__snake_case = (1 - t) * va + t * va
else:
__snake_case = np.arccos(A__ )
__snake_case = np.sin(A__ )
__snake_case = theta_a * t
__snake_case = np.sin(A__ )
__snake_case = np.sin(theta_a - theta_t ) / sin_theta_a
__snake_case = sin_theta_t / sin_theta_a
__snake_case = sa * va + sa * va
if inputs_are_torch:
__snake_case = torch.from_numpy(A__ ).to(A__ )
return va
def lowerCamelCase__ ( snake_case_ : Optional[int] , snake_case_ : Tuple ) -> List[str]:
__snake_case = F.normalize(A__ , dim=-1 )
__snake_case = F.normalize(A__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> Dict:
for param in model.parameters():
__snake_case = value
class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase ):
def __init__(self : Tuple , a__ : AutoencoderKL , a__ : CLIPTextModel , a__ : CLIPModel , a__ : CLIPTokenizer , a__ : UNetaDConditionModel , a__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , a__ : CLIPFeatureExtractor , a__ : Optional[int]=None , a__ : Union[str, Any]=None , a__ : List[str]=None , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , clip_model=UpperCamelCase_ , tokenizer=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , coca_model=UpperCamelCase_ , coca_tokenizer=UpperCamelCase_ , coca_transform=UpperCamelCase_ , )
__snake_case = (
feature_extractor.size
if isinstance(feature_extractor.size , UpperCamelCase_ )
else feature_extractor.size['''shortest_edge''']
)
__snake_case = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , UpperCamelCase_ )
set_requires_grad(self.clip_model , UpperCamelCase_ )
def a (self : List[Any] , a__ : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase_ )
def a (self : str ):
"""simple docstring"""
self.enable_attention_slicing(UpperCamelCase_ )
def a (self : Tuple ):
"""simple docstring"""
set_requires_grad(self.vae , UpperCamelCase_ )
def a (self : Tuple ):
"""simple docstring"""
set_requires_grad(self.vae , UpperCamelCase_ )
def a (self : List[Any] ):
"""simple docstring"""
set_requires_grad(self.unet , UpperCamelCase_ )
def a (self : List[Any] ):
"""simple docstring"""
set_requires_grad(self.unet , UpperCamelCase_ )
def a (self : List[str] , a__ : Dict , a__ : List[str] , a__ : str ):
"""simple docstring"""
__snake_case = min(int(num_inference_steps * strength ) , UpperCamelCase_ )
__snake_case = max(num_inference_steps - init_timestep , 0 )
__snake_case = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a (self : str , a__ : str , a__ : int , a__ : Union[str, Any] , a__ : Optional[int] , a__ : Dict , a__ : Optional[Any]=None ):
"""simple docstring"""
if not isinstance(UpperCamelCase_ , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(UpperCamelCase_ )}""" )
__snake_case = image.to(device=UpperCamelCase_ , dtype=UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__snake_case = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCamelCase_ )
]
__snake_case = torch.cat(UpperCamelCase_ , dim=0 )
else:
__snake_case = self.vae.encode(UpperCamelCase_ ).latent_dist.sample(UpperCamelCase_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case = 0.1_8_2_1_5 * init_latents
__snake_case = init_latents.repeat_interleave(UpperCamelCase_ , dim=0 )
__snake_case = randn_tensor(init_latents.shape , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
# get latents
__snake_case = self.scheduler.add_noise(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__snake_case = init_latents
return latents
def a (self : int , a__ : Any ):
"""simple docstring"""
__snake_case = self.coca_transform(UpperCamelCase_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__snake_case = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__snake_case = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def a (self : List[str] , a__ : Optional[Any] , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = self.feature_extractor.preprocess(UpperCamelCase_ )
__snake_case = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
__snake_case = self.clip_model.get_image_features(UpperCamelCase_ )
__snake_case = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCamelCase_ )
__snake_case = image_embeddings_clip.repeat_interleave(UpperCamelCase_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def a (self : int , a__ : Union[str, Any] , a__ : List[Any] , a__ : int , a__ : List[Any] , a__ : Optional[Any] , a__ : Any , a__ : List[Any] , ):
"""simple docstring"""
__snake_case = latents.detach().requires_grad_()
__snake_case = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
# predict the noise residual
__snake_case = self.unet(UpperCamelCase_ , UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__snake_case = self.scheduler.alphas_cumprod[timestep]
__snake_case = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__snake_case = torch.sqrt(UpperCamelCase_ )
__snake_case = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , UpperCamelCase_ ):
__snake_case = self.scheduler.sigmas[index]
__snake_case = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case = 1 / 0.1_8_2_1_5 * sample
__snake_case = self.vae.decode(UpperCamelCase_ ).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = transforms.Resize(self.feature_extractor_size )(UpperCamelCase_ )
__snake_case = self.normalize(UpperCamelCase_ ).to(latents.dtype )
__snake_case = self.clip_model.get_image_features(UpperCamelCase_ )
__snake_case = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCamelCase_ )
__snake_case = spherical_dist_loss(UpperCamelCase_ , UpperCamelCase_ ).mean() * clip_guidance_scale
__snake_case = -torch.autograd.grad(UpperCamelCase_ , UpperCamelCase_ )[0]
if isinstance(self.scheduler , UpperCamelCase_ ):
__snake_case = latents.detach() + grads * (sigma**2)
__snake_case = noise_pred_original
else:
__snake_case = noise_pred_original - torch.sqrt(UpperCamelCase_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(self : Union[str, Any] , a__ : Union[torch.FloatTensor, PIL.Image.Image] , a__ : Union[torch.FloatTensor, PIL.Image.Image] , a__ : Optional[str] = None , a__ : Optional[str] = None , a__ : Optional[int] = 512 , a__ : Optional[int] = 512 , a__ : float = 0.6 , a__ : Optional[int] = 50 , a__ : Optional[float] = 7.5 , a__ : Optional[int] = 1 , a__ : float = 0.0 , a__ : Optional[float] = 100 , a__ : Optional[torch.Generator] = None , a__ : Optional[str] = "pil" , a__ : bool = True , a__ : float = 0.8 , a__ : float = 0.1 , a__ : float = 0.1 , ):
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(UpperCamelCase_ )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(UpperCamelCase_ , torch.Generator ) and batch_size > 1:
__snake_case = [generator] + [None] * (batch_size - 1)
__snake_case = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
__snake_case = [x[0] for x in coca_is_none if x[1]]
__snake_case = ''', '''.join(UpperCamelCase_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCamelCase_ ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__snake_case = self.get_image_description(UpperCamelCase_ )
if style_prompt is None:
if len(UpperCamelCase_ ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__snake_case = self.get_image_description(UpperCamelCase_ )
# get prompt text embeddings for content and style
__snake_case = self.tokenizer(
UpperCamelCase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors='''pt''' , )
__snake_case = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__snake_case = self.tokenizer(
UpperCamelCase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors='''pt''' , )
__snake_case = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__snake_case = slerp(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# duplicate text embeddings for each generation per prompt
__snake_case = text_embeddings.repeat_interleave(UpperCamelCase_ , dim=0 )
# set timesteps
__snake_case = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__snake_case = {}
if accepts_offset:
__snake_case = 1
self.scheduler.set_timesteps(UpperCamelCase_ , **UpperCamelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__snake_case , __snake_case = self.get_timesteps(UpperCamelCase_ , UpperCamelCase_ , self.device )
__snake_case = timesteps[:1].repeat(UpperCamelCase_ )
# Preprocess image
__snake_case = preprocess(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__snake_case = self.prepare_latents(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , text_embeddings.dtype , self.device , UpperCamelCase_ )
__snake_case = preprocess(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__snake_case = self.prepare_latents(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , text_embeddings.dtype , self.device , UpperCamelCase_ )
__snake_case = slerp(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if clip_guidance_scale > 0:
__snake_case = self.get_clip_image_embeddings(UpperCamelCase_ , UpperCamelCase_ )
__snake_case = self.get_clip_image_embeddings(UpperCamelCase_ , UpperCamelCase_ )
__snake_case = slerp(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = content_text_input.input_ids.shape[-1]
__snake_case = self.tokenizer([''''''] , padding='''max_length''' , max_length=UpperCamelCase_ , return_tensors='''pt''' )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__snake_case = uncond_embeddings.repeat_interleave(UpperCamelCase_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__snake_case = torch.randn(UpperCamelCase_ , generator=UpperCamelCase_ , device='''cpu''' , dtype=UpperCamelCase_ ).to(
self.device )
else:
__snake_case = torch.randn(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=UpperCamelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__snake_case = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case = {}
if accepts_eta:
__snake_case = eta
# check if the scheduler accepts generator
__snake_case = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__snake_case = generator
with self.progress_bar(total=UpperCamelCase_ ):
for i, t in enumerate(UpperCamelCase_ ):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
# predict the noise residual
__snake_case = self.unet(UpperCamelCase_ , UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2 )
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__snake_case = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__snake_case , __snake_case = self.cond_fn(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case = 1 / 0.1_8_2_1_5 * latents
__snake_case = self.vae.decode(UpperCamelCase_ ).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCamelCase_ , nsfw_content_detected=UpperCamelCase_ )
| 357 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case , __snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case = controlnet_params
__snake_case = '''bird'''
__snake_case = jax.device_count()
__snake_case = pipe.prepare_text_inputs([prompts] * num_samples )
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
__snake_case = pipe.prepare_image_inputs([canny_image] * num_samples )
__snake_case = jax.random.PRNGKey(0 )
__snake_case = jax.random.split(a__ , jax.device_count() )
__snake_case = replicate(a__ )
__snake_case = shard(a__ )
__snake_case = shard(a__ )
__snake_case = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__snake_case = images[0, 253:256, 253:256, -1]
__snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__snake_case = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case , __snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case = controlnet_params
__snake_case = '''Chef in the kitchen'''
__snake_case = jax.device_count()
__snake_case = pipe.prepare_text_inputs([prompts] * num_samples )
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
__snake_case = pipe.prepare_image_inputs([pose_image] * num_samples )
__snake_case = jax.random.PRNGKey(0 )
__snake_case = jax.random.split(a__ , jax.device_count() )
__snake_case = replicate(a__ )
__snake_case = shard(a__ )
__snake_case = shard(a__ )
__snake_case = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__snake_case = images[0, 253:256, 253:256, -1]
__snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__snake_case = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 238 | 0 |
"""simple docstring"""
from pathlib import Path
import fire
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : int ):
"""simple docstring"""
_snake_case : Union[str, Any] = Path(snake_case__ )
_snake_case : int = Path(snake_case__ )
dest_dir.mkdir(exist_ok=snake_case__ )
for path in src_dir.iterdir():
_snake_case : List[Any] = [x.rstrip() for x in list(path.open().readlines() )][:n]
_snake_case : Tuple = dest_dir.joinpath(path.name )
print(snake_case__ )
dest_path.open("""w""" ).write("""\n""".join(snake_case__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 64 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ : Any = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE__ : List[Any] = np.concatenate(__lowerCAmelCase , axis=0 )
SCREAMING_SNAKE_CASE__ : List[str] = np.array(__lowerCAmelCase ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE__ : Optional[Any] = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ : Tuple = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE__ : Tuple = torch.from_numpy(__lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat(__lowerCAmelCase , dim=0 )
return image
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=0.9_995 ) -> Union[str, Any]:
if not isinstance(__lowerCAmelCase , np.ndarray ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : int = va.device
SCREAMING_SNAKE_CASE__ : str = va.cpu().numpy()
SCREAMING_SNAKE_CASE__ : str = va.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = np.sum(va * va / (np.linalg.norm(__lowerCAmelCase ) * np.linalg.norm(__lowerCAmelCase )) )
if np.abs(__lowerCAmelCase ) > DOT_THRESHOLD:
SCREAMING_SNAKE_CASE__ : Tuple = (1 - t) * va + t * va
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = np.arccos(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.sin(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = theta_a * t
SCREAMING_SNAKE_CASE__ : Tuple = np.sin(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = np.sin(theta_a - theta_t ) / sin_theta_a
SCREAMING_SNAKE_CASE__ : Optional[int] = sin_theta_t / sin_theta_a
SCREAMING_SNAKE_CASE__ : List[Any] = sa * va + sa * va
if inputs_are_torch:
SCREAMING_SNAKE_CASE__ : str = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
return va
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple = F.normalize(__lowerCAmelCase , dim=-1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = F.normalize(__lowerCAmelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
for param in model.parameters():
SCREAMING_SNAKE_CASE__ : int = value
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a=None , _a=None , _a=None , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=_a , text_encoder=_a , clip_model=_a , tokenizer=_a , unet=_a , scheduler=_a , feature_extractor=_a , coca_model=_a , coca_tokenizer=_a , coca_transform=_a , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , _a )
else feature_extractor.size["""shortest_edge"""]
)
SCREAMING_SNAKE_CASE__ : List[Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _a )
set_requires_grad(self.clip_model , _a )
def _a ( self , _a = "auto" ) -> Dict:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def _a ( self ) -> List[str]:
"""simple docstring"""
self.enable_attention_slicing(_a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
set_requires_grad(self.vae , _a )
def _a ( self ) -> Dict:
"""simple docstring"""
set_requires_grad(self.vae , _a )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
set_requires_grad(self.unet , _a )
def _a ( self ) -> int:
"""simple docstring"""
set_requires_grad(self.unet , _a )
def _a ( self , _a , _a , _a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = min(int(num_inference_steps * strength ) , _a )
SCREAMING_SNAKE_CASE__ : Optional[int] = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE__ : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _a ( self , _a , _a , _a , _a , _a , _a=None ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(_a , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(_a )}''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = image.to(device=_a , dtype=_a )
if isinstance(_a , _a ):
SCREAMING_SNAKE_CASE__ : int = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_a )
]
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat(_a , dim=0 )
else:
SCREAMING_SNAKE_CASE__ : int = self.vae.encode(_a ).latent_dist.sample(_a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0.18_215 * init_latents
SCREAMING_SNAKE_CASE__ : List[str] = init_latents.repeat_interleave(_a , dim=0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = randn_tensor(init_latents.shape , generator=_a , device=_a , dtype=_a )
# get latents
SCREAMING_SNAKE_CASE__ : Any = self.scheduler.add_noise(_a , _a , _a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = init_latents
return latents
def _a ( self , _a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.coca_transform(_a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE__ : List[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
SCREAMING_SNAKE_CASE__ : str = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" )
def _a ( self , _a , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feature_extractor.preprocess(_a )
SCREAMING_SNAKE_CASE__ : str = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
SCREAMING_SNAKE_CASE__ : Any = self.clip_model.get_image_features(_a )
SCREAMING_SNAKE_CASE__ : int = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_embeddings_clip.repeat_interleave(_a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _a ( self , _a , _a , _a , _a , _a , _a , _a , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = latents.detach().requires_grad_()
SCREAMING_SNAKE_CASE__ : str = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
SCREAMING_SNAKE_CASE__ : Any = self.unet(_a , _a , encoder_hidden_states=_a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.scheduler.alphas_cumprod[timestep]
SCREAMING_SNAKE_CASE__ : List[Any] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ : Optional[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
SCREAMING_SNAKE_CASE__ : List[str] = torch.sqrt(_a )
SCREAMING_SNAKE_CASE__ : Dict = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler.sigmas[index]
SCREAMING_SNAKE_CASE__ : Dict = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 / 0.18_215 * sample
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.vae.decode(_a ).sample
SCREAMING_SNAKE_CASE__ : Any = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Any = transforms.Resize(self.feature_extractor_size )(_a )
SCREAMING_SNAKE_CASE__ : Dict = self.normalize(_a ).to(latents.dtype )
SCREAMING_SNAKE_CASE__ : Tuple = self.clip_model.get_image_features(_a )
SCREAMING_SNAKE_CASE__ : int = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = spherical_dist_loss(_a , _a ).mean() * clip_guidance_scale
SCREAMING_SNAKE_CASE__ : Optional[Any] = -torch.autograd.grad(_a , _a )[0]
if isinstance(self.scheduler , _a ):
SCREAMING_SNAKE_CASE__ : Any = latents.detach() + grads * (sigma**2)
SCREAMING_SNAKE_CASE__ : Optional[int] = noise_pred_original
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = noise_pred_original - torch.sqrt(_a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , _a , _a , _a = None , _a = None , _a = 512 , _a = 512 , _a = 0.6 , _a = 50 , _a = 7.5 , _a = 1 , _a = 0.0 , _a = 100 , _a = None , _a = "pil" , _a = True , _a = 0.8 , _a = 0.1 , _a = 0.1 , ) -> int:
"""simple docstring"""
if isinstance(_a , _a ) and len(_a ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(_a )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(_a , torch.Generator ) and batch_size > 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [generator] + [None] * (batch_size - 1)
SCREAMING_SNAKE_CASE__ : List[Any] = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
SCREAMING_SNAKE_CASE__ : Optional[int] = [x[0] for x in coca_is_none if x[1]]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """, """.join(_a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_a ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
SCREAMING_SNAKE_CASE__ : Any = self.get_image_description(_a )
if style_prompt is None:
if len(_a ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
SCREAMING_SNAKE_CASE__ : Tuple = self.get_image_description(_a )
# get prompt text embeddings for content and style
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer(
_a , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE__ : Any = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer(
_a , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE__ : List[str] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = slerp(_a , _a , _a )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE__ : int = text_embeddings.repeat_interleave(_a , dim=0 )
# set timesteps
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
SCREAMING_SNAKE_CASE__ : Tuple = {}
if accepts_offset:
SCREAMING_SNAKE_CASE__ : List[str] = 1
self.scheduler.set_timesteps(_a , **_a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_timesteps(_a , _a , self.device )
SCREAMING_SNAKE_CASE__ : List[str] = timesteps[:1].repeat(_a )
# Preprocess image
SCREAMING_SNAKE_CASE__ : str = preprocess(_a , _a , _a )
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a )
SCREAMING_SNAKE_CASE__ : List[Any] = preprocess(_a , _a , _a )
SCREAMING_SNAKE_CASE__ : Any = self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a )
SCREAMING_SNAKE_CASE__ : List[Any] = slerp(_a , _a , _a )
if clip_guidance_scale > 0:
SCREAMING_SNAKE_CASE__ : List[str] = self.get_clip_image_embeddings(_a , _a )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_clip_image_embeddings(_a , _a )
SCREAMING_SNAKE_CASE__ : Dict = slerp(
_a , _a , _a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
SCREAMING_SNAKE_CASE__ : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = content_text_input.input_ids.shape[-1]
SCREAMING_SNAKE_CASE__ : str = self.tokenizer([""""""] , padding="""max_length""" , max_length=_a , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
SCREAMING_SNAKE_CASE__ : Tuple = uncond_embeddings.repeat_interleave(_a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
SCREAMING_SNAKE_CASE__ : Dict = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
SCREAMING_SNAKE_CASE__ : Any = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
SCREAMING_SNAKE_CASE__ : List[str] = torch.randn(_a , generator=_a , device="""cpu""" , dtype=_a ).to(
self.device )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.randn(_a , generator=_a , device=self.device , dtype=_a )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE__ : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE__ : str = {}
if accepts_eta:
SCREAMING_SNAKE_CASE__ : Optional[Any] = eta
# check if the scheduler accepts generator
SCREAMING_SNAKE_CASE__ : int = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
SCREAMING_SNAKE_CASE__ : Optional[Any] = generator
with self.progress_bar(total=_a ):
for i, t in enumerate(_a ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
SCREAMING_SNAKE_CASE__ : List[Any] = self.unet(_a , _a , encoder_hidden_states=_a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
SCREAMING_SNAKE_CASE__ : List[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.cond_fn(
_a , _a , _a , _a , _a , _a , _a , )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__ : Any = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE__ : List[Any] = 1 / 0.18_215 * latents
SCREAMING_SNAKE_CASE__ : int = self.vae.decode(_a ).sample
SCREAMING_SNAKE_CASE__ : str = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ : int = self.numpy_to_pil(_a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_a , nsfw_content_detected=_a )
| 132 | 0 |
def UpperCamelCase ( ):
snake_case : Tuple = []
snake_case : Tuple = 1
while len(__lowerCamelCase ) < 1E6:
constant.append(str(__lowerCamelCase ) )
i += 1
snake_case : List[Any] = "".join(__lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 10 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__lowerCamelCase = """."""
if __name__ == "__main__":
__lowerCamelCase = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
__lowerCamelCase = []
__lowerCamelCase = []
with open(doctest_file_path) as fp:
for line in fp:
__lowerCamelCase = line.strip()
__lowerCamelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__lowerCamelCase = """\n""".join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 10 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :int , snake_case :int ):
'''simple docstring'''
A_ : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : List[Any] = None
A_ : Optional[Any] = 20
A_ : Any = self._get_uniform_logits(batch_size=2 , length=UpperCamelCase_ )
# tweak scores to not be uniform anymore
A_ : Optional[int] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
A_ : Optional[int] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
A_ : Dict = jax.nn.softmax(UpperCamelCase_ , axis=-1 )
A_ : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : List[str] = FlaxTemperatureLogitsWarper(temperature=1.3 )
A_ : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(UpperCamelCase_ , scores.copy() , cur_len=UpperCamelCase_ ) , axis=-1 )
A_ : Optional[Any] = jax.nn.softmax(temp_dist_warper_smoother(UpperCamelCase_ , scores.copy() , cur_len=UpperCamelCase_ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : List[str] = None
A_ : Optional[int] = 10
A_ : str = 2
# create ramp distribution
A_ : Optional[Any] = np.broadcast_to(np.arange(UpperCamelCase_ )[None, :] , (batch_size, vocab_size) ).copy()
A_ : Optional[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
A_ : Optional[int] = FlaxTopKLogitsWarper(3 )
A_ : Dict = top_k_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
A_ : List[Any] = 5
A_ : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
A_ : Dict = np.broadcast_to(np.arange(UpperCamelCase_ )[None, :] , (batch_size, length) ).copy()
A_ : Optional[int] = top_k_warp_safety_check(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : int = None
A_ : Dict = 10
A_ : List[str] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
A_ : List[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
A_ : List[str] = FlaxTopPLogitsWarper(0.8 )
A_ : Optional[Any] = np.exp(top_p_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
A_ : Dict = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
A_ : Optional[Any] = np.broadcast_to(np.arange(UpperCamelCase_ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
A_ : Optional[int] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
A_ : Dict = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
A_ : str = top_p_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Any = 20
A_ : Optional[int] = 4
A_ : Dict = 0
A_ : Any = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase_ )
# check that min length is applied at length 5
A_ : List[Any] = ids_tensor((batch_size, 20) , vocab_size=20 )
A_ : int = 5
A_ : Any = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
A_ : Dict = min_dist_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
A_ : Any = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
A_ : Optional[Any] = 15
A_ : str = min_dist_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertFalse(jnp.isinf(UpperCamelCase_ ).any() )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : str = 20
A_ : Union[str, Any] = 4
A_ : Optional[Any] = 0
A_ : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase_ )
# check that all scores are -inf except the bos_token_id score
A_ : Optional[int] = ids_tensor((batch_size, 1) , vocab_size=20 )
A_ : Any = 1
A_ : List[str] = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
A_ : Dict = logits_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
A_ : str = 3
A_ : Optional[Any] = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
A_ : Union[str, Any] = logits_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertFalse(jnp.isinf(UpperCamelCase_ ).any() )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = 20
A_ : Dict = 4
A_ : Tuple = 0
A_ : Any = 5
A_ : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
A_ : str = ids_tensor((batch_size, 4) , vocab_size=20 )
A_ : int = 4
A_ : str = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
A_ : List[Any] = logits_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
A_ : Tuple = 3
A_ : Union[str, Any] = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
A_ : Dict = logits_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertFalse(jnp.isinf(UpperCamelCase_ ).any() )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Union[str, Any] = 4
A_ : Tuple = 10
A_ : Union[str, Any] = 15
A_ : Union[str, Any] = 2
A_ : int = 1
A_ : Tuple = 15
# dummy input_ids and scores
A_ : Union[str, Any] = ids_tensor((batch_size, sequence_length) , UpperCamelCase_ )
A_ : Optional[int] = input_ids.copy()
A_ : Tuple = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
A_ : List[str] = scores.copy()
# instantiate all dist processors
A_ : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : Optional[Any] = FlaxTopKLogitsWarper(3 )
A_ : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
A_ : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase_ )
A_ : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase_ )
A_ : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
A_ : List[str] = 10
# no processor list
A_ : Dict = temp_dist_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
A_ : Dict = top_k_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
A_ : Optional[Any] = top_p_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
A_ : Any = min_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
A_ : int = bos_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
A_ : List[Any] = eos_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
# with processor list
A_ : Tuple = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
A_ : Optional[int] = processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Dict = 4
A_ : str = 10
A_ : str = 15
A_ : Union[str, Any] = 2
A_ : List[Any] = 1
A_ : List[Any] = 15
# dummy input_ids and scores
A_ : int = ids_tensor((batch_size, sequence_length) , UpperCamelCase_ )
A_ : Dict = input_ids.copy()
A_ : str = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
A_ : Any = scores.copy()
# instantiate all dist processors
A_ : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : str = FlaxTopKLogitsWarper(3 )
A_ : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
A_ : str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase_ )
A_ : Tuple = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase_ )
A_ : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
A_ : Optional[Any] = 10
# no processor list
def run_no_processor_list(snake_case :List[Any] , snake_case :Dict , snake_case :Optional[Any] ):
A_ : Optional[Any] = temp_dist_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
A_ : List[Any] = top_k_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
A_ : Optional[Any] = top_p_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
A_ : Dict = min_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
A_ : int = bos_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
A_ : str = eos_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
return scores
# with processor list
def run_processor_list(snake_case :int , snake_case :Optional[int] , snake_case :Dict ):
A_ : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
A_ : Optional[int] = processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
return scores
A_ : Any = jax.jit(UpperCamelCase_ )
A_ : int = jax.jit(UpperCamelCase_ )
A_ : str = jitted_run_no_processor_list(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
A_ : int = jitted_run_processor_list(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 300 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
lowerCAmelCase : Optional[int] = '''A painting of a squirrel eating a burger'''
lowerCAmelCase : List[str] = jax.device_count()
lowerCAmelCase : Optional[int] = num_samples * [prompt]
lowerCAmelCase : Any = sd_pipe.prepare_inputs(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = replicate(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = shard(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = jax.random.PRNGKey(0 )
lowerCAmelCase : Optional[Any] = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowerCAmelCase : str = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=2_5 , jit=UpperCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
lowerCAmelCase : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase : List[str] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = '''stabilityai/stable-diffusion-2'''
lowerCAmelCase, lowerCAmelCase : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(UpperCamelCase_ , subfolder='''scheduler''' )
lowerCAmelCase, lowerCAmelCase : int = FlaxStableDiffusionPipeline.from_pretrained(
UpperCamelCase_ , scheduler=UpperCamelCase_ , revision='''bf16''' , dtype=jnp.bfloataa , )
lowerCAmelCase : List[Any] = scheduler_params
lowerCAmelCase : List[Any] = '''A painting of a squirrel eating a burger'''
lowerCAmelCase : Any = jax.device_count()
lowerCAmelCase : int = num_samples * [prompt]
lowerCAmelCase : int = sd_pipe.prepare_inputs(UpperCamelCase_ )
lowerCAmelCase : Dict = replicate(UpperCamelCase_ )
lowerCAmelCase : Tuple = shard(UpperCamelCase_ )
lowerCAmelCase : int = jax.random.PRNGKey(0 )
lowerCAmelCase : Optional[int] = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowerCAmelCase : Tuple = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=2_5 , jit=UpperCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
lowerCAmelCase : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase : str = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase : Tuple = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 60 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["image_processor", "tokenizer"]
lowercase_ = "ChineseCLIPImageProcessor"
lowercase_ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Optional[Any] , _lowerCAmelCase : Any=None , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.image_processor
def __call__( self : Optional[int] , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : str=None , _lowerCAmelCase : str=None , **_lowerCAmelCase : Tuple ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if images is not None:
SCREAMING_SNAKE_CASE_ = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] , *_lowerCAmelCase : int , **_lowerCAmelCase : Union[str, Any] ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : str , *_lowerCAmelCase : Dict , **_lowerCAmelCase : List[Any] ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase_ ( self : List[str] ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowerCAmelCase , )
return self.image_processor_class | 210 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
@abstractmethod
def lowerCAmelCase_ ( _lowerCAmelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def lowerCAmelCase_ ( self : Dict ):
raise NotImplementedError() | 210 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
A ={'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =['BeitFeatureExtractor']
A =['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =[
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =[
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34 | def lowerCAmelCase_ ( __A, __A ) -> None:
'''simple docstring'''
UpperCAmelCase__ = len(__A )
print("The following activities are selected:" )
# The first activity is always selected
UpperCAmelCase__ = 0
print(__A, end="," )
# Consider rest of the activities
for j in range(__A ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__A, end="," )
UpperCAmelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = [1, 3, 0, 5, 8, 5]
UpperCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 65 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
lowercase : Any = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __snake_case ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Any= StableDiffusionLatentUpscalePipeline
_a : List[str]= TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
_a : Optional[Any]= PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
_a : Tuple= TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a : int= frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a : Optional[int]= frozenset([] )
_a : List[str]= True
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = 1
lowercase : Dict = 4
lowercase : Union[str, Any] = (16, 16)
lowercase : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(snake_case )
return image
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Union[str, Any] = UNetaDConditionModel(
act_fn="""gelu""" ,attention_head_dim=8 ,norm_num_groups=snake_case ,block_out_channels=[32, 32, 64, 64] ,time_cond_proj_dim=160 ,conv_in_kernel=1 ,conv_out_kernel=1 ,cross_attention_dim=32 ,down_block_types=(
"""KDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
) ,in_channels=8 ,mid_block_type=snake_case ,only_cross_attention=snake_case ,out_channels=5 ,resnet_time_scale_shift="""scale_shift""" ,time_embedding_type="""fourier""" ,timestep_post_act="""gelu""" ,up_block_types=("""KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KUpBlock2D""") ,)
lowercase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=[
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
lowercase : int = EulerDiscreteScheduler(prediction_type="""sample""" )
lowercase : Optional[int] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""quick_gelu""" ,projection_dim=512 ,)
lowercase : str = CLIPTextModel(snake_case )
lowercase : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase : Optional[int] = {
"""unet""": model.eval(),
"""vae""": vae.eval(),
"""scheduler""": scheduler,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=0 ):
'''simple docstring'''
if str(snake_case ).startswith("""mps""" ):
lowercase : Tuple = torch.manual_seed(snake_case )
else:
lowercase : Tuple = torch.Generator(device=snake_case ).manual_seed(snake_case )
lowercase : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": self.dummy_image.cpu(),
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = """cpu"""
lowercase : List[Any] = self.get_dummy_components()
lowercase : Dict = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
lowercase : Optional[Any] = self.get_dummy_inputs(snake_case )
lowercase : Any = pipe(**snake_case ).images
lowercase : Dict = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 256, 256, 3) )
lowercase : Optional[int] = np.array(
[0.47_222_412, 0.41_921_633, 0.44_717_434, 0.46_874_192, 0.42_588_258, 0.46_150_726, 0.4_677_534, 0.45_583_832, 0.48_579_055] )
lowercase : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case ,1e-3 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = [
"""DDIMScheduler""",
"""DDPMScheduler""",
"""PNDMScheduler""",
"""HeunDiscreteScheduler""",
"""EulerAncestralDiscreteScheduler""",
"""KDPM2DiscreteScheduler""",
"""KDPM2AncestralDiscreteScheduler""",
"""DPMSolverSDEScheduler""",
]
lowercase : List[Any] = self.get_dummy_components()
lowercase : Tuple = self.pipeline_class(**snake_case )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
lowercase : Tuple = self.get_dummy_inputs(snake_case )
lowercase : str = 2
lowercase : Dict = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowercase : Tuple = getattr(snake_case ,scheduler_enum.name )
lowercase : int = scheduler_cls.from_config(pipe.scheduler.config )
lowercase : Dict = pipe(**snake_case )[0]
outputs.append(snake_case )
assert check_same_shape(snake_case )
@require_torch_gpu
@slow
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = torch.manual_seed(33 )
lowercase : Union[str, Any] = StableDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ,torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowercase : List[str] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" ,torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
lowercase : List[str] = """a photo of an astronaut high resolution, unreal engine, ultra realistic"""
lowercase : Any = pipe(snake_case ,generator=snake_case ,output_type="""latent""" ).images
lowercase : Union[str, Any] = upscaler(
prompt=snake_case ,image=snake_case ,num_inference_steps=20 ,guidance_scale=0 ,generator=snake_case ,output_type="""np""" ,).images[0]
lowercase : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy""" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = torch.manual_seed(33 )
lowercase : Tuple = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" ,torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
lowercase : str = """the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"""
lowercase : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png""" )
lowercase : List[Any] = upscaler(
prompt=snake_case ,image=snake_case ,num_inference_steps=20 ,guidance_scale=0 ,generator=snake_case ,output_type="""np""" ,).images[0]
lowercase : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-2
| 371 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( lowerCAmelCase , unittest.TestCase ):
_a : Optional[int]= None
_a : Optional[Any]= BloomTokenizerFast
_a : Tuple= BloomTokenizerFast
_a : str= True
_a : Optional[int]= False
_a : List[Any]= "tokenizer_file"
_a : List[Any]= {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setUp()
lowercase : Optional[Any] = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.get_rust_tokenizer()
lowercase : List[str] = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
lowercase : Optional[int] = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase : Any = tokenizer.batch_encode_plus(snake_case )["""input_ids"""]
self.assertListEqual(snake_case ,snake_case )
lowercase : Optional[int] = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase : Dict = self.rust_tokenizer_class.from_pretrained(snake_case ,**snake_case )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase : Dict = """This is a simple input"""
lowercase : Tuple = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase : Dict = ("""This is a simple input""", """This is a pair""")
lowercase : Optional[Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(snake_case ,max_length=snake_case )
tokenizer_r.encode_plus(snake_case ,max_length=snake_case )
tokenizer_r.batch_encode_plus(snake_case ,max_length=snake_case )
tokenizer_r.encode(snake_case ,max_length=snake_case )
tokenizer_r.batch_encode_plus(snake_case ,max_length=snake_case )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
lowercase : Optional[int] = None # Hotfixing padding = None
self.assertRaises(snake_case ,tokenizer_r.encode ,snake_case ,max_length=snake_case ,padding="""max_length""" )
# Simple input
self.assertRaises(snake_case ,tokenizer_r.encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" )
# Simple input
self.assertRaises(
snake_case ,tokenizer_r.batch_encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" ,)
# Pair input
self.assertRaises(snake_case ,tokenizer_r.encode ,snake_case ,max_length=snake_case ,padding="""max_length""" )
# Pair input
self.assertRaises(snake_case ,tokenizer_r.encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" )
# Pair input
self.assertRaises(
snake_case ,tokenizer_r.batch_encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = self.get_rust_tokenizer()
lowercase : List[str] = load_dataset("""xnli""" ,"""all_languages""" ,split="""test""" ,streaming=snake_case )
lowercase : Optional[Any] = next(iter(snake_case ) )["""premise"""] # pick up one data
lowercase : str = list(sample_data.values() )
lowercase : Optional[int] = list(map(tokenizer.encode ,snake_case ) )
lowercase : Dict = [tokenizer.decode(snake_case ,clean_up_tokenization_spaces=snake_case ) for x in output_tokens]
self.assertListEqual(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) ,1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) ,1 )
| 285 | 0 |
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a_ ( lowerCamelCase ):
return EnvironmentCommand()
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( lowerCamelCase__ : ArgumentParser ):
UpperCAmelCase__ = parser.add_parser('env' )
download_parser.set_defaults(func=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = huggingface_hub.__version__
UpperCAmelCase__ = 'not installed'
UpperCAmelCase__ = 'NA'
if is_torch_available():
import torch
UpperCAmelCase__ = torch.__version__
UpperCAmelCase__ = torch.cuda.is_available()
UpperCAmelCase__ = 'not installed'
if is_transformers_available():
import transformers
UpperCAmelCase__ = transformers.__version__
UpperCAmelCase__ = 'not installed'
if is_accelerate_available():
import accelerate
UpperCAmelCase__ = accelerate.__version__
UpperCAmelCase__ = 'not installed'
if is_xformers_available():
import xformers
UpperCAmelCase__ = xformers.__version__
UpperCAmelCase__ = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': f'''{pt_version} ({pt_cuda_available})''',
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(lowerCamelCase__ ) )
return info
@staticmethod
def __lowerCAmelCase ( lowerCamelCase__ : Any ):
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 98 | """simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def a_ ( lowerCamelCase ):
return np.dot(lowerCamelCase , lowerCamelCase )
class snake_case :
"""simple docstring"""
def __init__( self : int ,*,
lowerCamelCase__ : float = np.inf ,lowerCamelCase__ : str = "linear" ,lowerCamelCase__ : float = 0.0 ,):
UpperCAmelCase__ = regularization
UpperCAmelCase__ = gamma
if kernel == "linear":
UpperCAmelCase__ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma ,(float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
UpperCAmelCase__ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase__ = f'''Unknown kernel: {kernel}'''
raise ValueError(lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : ndarray ,lowerCamelCase__ : ndarray ):
return np.dot(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : ndarray ,lowerCamelCase__ : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : list[ndarray] ,lowerCamelCase__ : ndarray ):
UpperCAmelCase__ = observations
UpperCAmelCase__ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase__) , ) = np.shape(lowerCamelCase__ )
def to_minimize(lowerCamelCase__ : ndarray ) -> float:
UpperCAmelCase__ = 0
((UpperCAmelCase__) , ) = np.shape(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] ,observations[j] )
)
return 1 / 2 * s - sum(lowerCamelCase__ )
UpperCAmelCase__ = LinearConstraint(lowerCamelCase__ ,0 ,0 )
UpperCAmelCase__ = Bounds(0 ,self.regularization )
UpperCAmelCase__ = minimize(
lowerCamelCase__ ,np.ones(lowerCamelCase__ ) ,bounds=lowerCamelCase__ ,constraints=[ly_contraint] ).x
UpperCAmelCase__ = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase__ = 0
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] ,observations[j] )
UpperCAmelCase__ = s / n
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : ndarray ):
UpperCAmelCase__ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] ,lowerCamelCase__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
'''simple docstring'''
def __lowercase ( __lowercase ) -> bool:
'''simple docstring'''
_A = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_A = set()
return any(
node not in visited and depth_first_search(__lowercase , __lowercase , __lowercase , __lowercase )
for node in graph )
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> bool:
'''simple docstring'''
visited.add(__lowercase )
rec_stk.add(__lowercase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__lowercase , __lowercase , __lowercase , __lowercase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__lowercase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 365 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 174 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=4 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_attention_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_choices
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_attention_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = True
__UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = True
lowercase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=__UpperCAmelCase )
__UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCAmelCase )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=__UpperCAmelCase )
__UpperCamelCase = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__UpperCamelCase = model(__UpperCAmelCase )[0]
__UpperCamelCase = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) , __UpperCAmelCase )
# compare the actual values for a slice.
__UpperCamelCase = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=__UpperCAmelCase )
__UpperCamelCase = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__UpperCamelCase = model(__UpperCAmelCase )[0]
# compare the actual values for a slice.
__UpperCamelCase = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 316 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase : Union[str, Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
UpperCamelCase : Any = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
UpperCamelCase : Tuple = "|".join(sys.argv[1:])
UpperCamelCase : Optional[int] = re.compile(Rf'''^({joined_dirs}).*?\.py$''')
UpperCamelCase : Optional[Any] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 316 | 1 |
_A = [
'''DownloadConfig''',
'''DownloadManager''',
'''DownloadMode''',
'''StreamingDownloadManager''',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 368 |
def __UpperCamelCase ( _A ):
if length <= 0 or not isinstance(_A , _A ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(_A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 167 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__A : Dict = logging.get_logger(__name__)
class _UpperCAmelCase ( _A ):
def __init__( self : int , *A : Tuple , **A : int ) -> None:
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 33 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
SCREAMING_SNAKE_CASE__ : int = None
def __magic_name__ ( ) -> str:
__lowerCamelCase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__lowerCAmelCase , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__lowerCAmelCase , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __magic_name__ ( __lowerCAmelCase : Dict ) -> Optional[Any]:
def remove_articles(__lowerCAmelCase : Optional[int] ):
return ARTICLES_REGEX.sub(''' ''' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase : Union[str, Any] ):
__lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Optional[int]:
if not s:
return []
return normalize_answer(__lowerCAmelCase ).split()
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> int:
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ) -> str:
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = collections.Counter(__lowerCAmelCase ) & collections.Counter(__lowerCAmelCase )
__lowerCamelCase = sum(common.values() )
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> Optional[Any]:
__lowerCamelCase = {}
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = qa['''id''']
__lowerCamelCase = [t for t in qa['''answers''']['''text'''] if normalize_answer(__lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__lowerCamelCase = ['''''']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
__lowerCamelCase = preds[qid]
# Take max over all gold answers
__lowerCamelCase = max(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
__lowerCamelCase = max(compute_fa(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
__lowerCamelCase = {}
for qid, s in scores.items():
__lowerCamelCase = na_probs[qid] > na_prob_thresh
if pred_na:
__lowerCamelCase = float(not qid_to_has_ans[qid] )
else:
__lowerCamelCase = s
return new_scores
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None ) -> Union[str, Any]:
if not qid_list:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> int:
for k in new_eval:
__lowerCamelCase = new_eval[k]
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
plt.step(__lowerCAmelCase , __lowerCAmelCase , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__lowerCAmelCase , __lowerCAmelCase , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCAmelCase )
plt.savefig(__lowerCAmelCase )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None ) -> int:
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
__lowerCamelCase = 0.0
__lowerCamelCase = 1.0
__lowerCamelCase = 0.0
__lowerCamelCase = [1.0]
__lowerCamelCase = [0.0]
__lowerCamelCase = 0.0
for i, qid in enumerate(__lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__lowerCamelCase = true_pos / float(i + 1 )
__lowerCamelCase = true_pos / float(__lowerCAmelCase )
if i == len(__lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCAmelCase )
recalls.append(__lowerCAmelCase )
if out_image:
plot_pr_curve(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ) -> List[Any]:
if out_image_dir and not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
__lowerCamelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__lowerCamelCase = {k: float(__lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_exact''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_f1''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_oracle''' )
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ) -> Optional[Any]:
if not qid_list:
return
__lowerCamelCase = [na_probs[k] for k in qid_list]
__lowerCamelCase = np.ones_like(__lowerCAmelCase ) / float(len(__lowerCAmelCase ) )
plt.hist(__lowerCAmelCase , weights=__lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__lowerCAmelCase , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__lowerCamelCase = num_no_ans
__lowerCamelCase = cur_score
__lowerCamelCase = 0.0
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(__lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__lowerCamelCase = scores[qid]
else:
if preds[qid]:
__lowerCamelCase = -1
else:
__lowerCamelCase = 0
cur_score += diff
if cur_score > best_score:
__lowerCamelCase = cur_score
__lowerCamelCase = na_probs[qid]
return 100.0 * best_score / len(__lowerCAmelCase ), best_thresh
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> int:
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = best_exact
__lowerCamelCase = exact_thresh
__lowerCamelCase = best_fa
__lowerCamelCase = fa_thresh
def __magic_name__ ( ) -> Optional[int]:
with open(OPTS.data_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
__lowerCamelCase = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
else:
__lowerCamelCase = {k: 0.0 for k in preds}
__lowerCamelCase = make_qid_to_has_ans(__lowerCAmelCase ) # maps qid to True/False
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if v]
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if not v]
__lowerCamelCase , __lowerCamelCase = get_raw_scores(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase )
if has_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''HasAns''' )
if no_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
else:
print(json.dumps(__lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 270 | 0 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCamelCase__ : int = ['text', 'image', 'audio']
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
inputs.append(create_inputs(__lowerCAmelCase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Any = []
for output in outputs:
if isinstance(__lowerCAmelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(__lowerCAmelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(__lowerCAmelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class lowerCAmelCase__ :
def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
_UpperCAmelCase : int = self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCamelCase__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
_UpperCAmelCase : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = create_inputs(self.tool.inputs )
_UpperCAmelCase : Tuple = self.tool(*lowerCamelCase__ )
# There is a single output
if len(self.tool.outputs ) == 1:
_UpperCAmelCase : Union[str, Any] = [outputs]
self.assertListEqual(output_types(lowerCamelCase__ ) , self.tool.outputs )
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = create_inputs(self.tool.inputs )
_UpperCAmelCase : str = self.tool(*lowerCamelCase__ )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : int = [outputs]
self.assertEqual(len(lowerCamelCase__ ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCamelCase__ , self.tool.outputs ):
_UpperCAmelCase : List[Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCamelCase__ , lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = create_inputs(self.tool.inputs )
_UpperCAmelCase : List[Any] = []
for _input, input_type in zip(lowerCamelCase__ , self.tool.inputs ):
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
_UpperCAmelCase : List[Any] = self.tool(*lowerCamelCase__ )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : List[Any] = [outputs]
self.assertEqual(len(lowerCamelCase__ ) , len(self.tool.outputs ) )
| 357 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
lowerCamelCase__ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __lowerCAmelCase (__lowerCAmelCase ):
with open(__lowerCAmelCase , "rb" ) as f:
_UpperCAmelCase : List[str] = Image.open(__lowerCAmelCase )
return im.convert("RGB" )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the training data."} )
lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the validation data."} )
lowerCAmelCase : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : str = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCAmelCase__ )} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
lowerCAmelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase : str = field(default=UpperCAmelCase__ , metadata={"help": "Name or path of preprocessor config."} )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = torch.stack([example["pixel_values"] for example in examples] )
_UpperCAmelCase : Tuple = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __lowerCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , __lowerCAmelCase , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_UpperCAmelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_UpperCAmelCase : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
_UpperCAmelCase : List[Any] = {}
if data_args.train_dir is not None:
_UpperCAmelCase : str = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
_UpperCAmelCase : Optional[Any] = os.path.join(data_args.validation_dir , "**" )
_UpperCAmelCase : Any = load_dataset(
"imagefolder" , data_files=__lowerCAmelCase , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
_UpperCAmelCase : int = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowerCAmelCase ) and data_args.train_val_split > 0.0:
_UpperCAmelCase : List[Any] = dataset["train"].train_test_split(data_args.train_val_split )
_UpperCAmelCase : List[str] = split["train"]
_UpperCAmelCase : Union[str, Any] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_UpperCAmelCase : Optional[int] = dataset["train"].features["labels"].names
_UpperCAmelCase , _UpperCAmelCase : int = {}, {}
for i, label in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : int = str(__lowerCAmelCase )
_UpperCAmelCase : str = label
# Load the accuracy metric from the datasets package
_UpperCAmelCase : int = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCAmelCase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowerCAmelCase ) , labelaid=__lowerCAmelCase , idalabel=__lowerCAmelCase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase : List[str] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
_UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_UpperCAmelCase : int = image_processor.size["shortest_edge"]
else:
_UpperCAmelCase : int = (image_processor.size["height"], image_processor.size["width"])
_UpperCAmelCase : str = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
_UpperCAmelCase : Optional[int] = Compose(
[
RandomResizedCrop(__lowerCAmelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_UpperCAmelCase : Union[str, Any] = Compose(
[
Resize(__lowerCAmelCase ),
CenterCrop(__lowerCAmelCase ),
ToTensor(),
normalize,
] )
def train_transforms(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_UpperCAmelCase : Dict = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__lowerCAmelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_UpperCAmelCase : Optional[Any] = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__lowerCAmelCase )
# Initalize our trainer
_UpperCAmelCase : Union[str, Any] = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase : Any = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : int = last_checkpoint
_UpperCAmelCase : Dict = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCAmelCase : Dict = trainer.evaluate()
trainer.log_metrics("eval" , __lowerCAmelCase )
trainer.save_metrics("eval" , __lowerCAmelCase )
# Write model card and (optionally) push to hub
_UpperCAmelCase : int = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCAmelCase )
else:
trainer.create_model_card(**__lowerCAmelCase )
if __name__ == "__main__":
main()
| 322 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class lowercase__ :
def __init__( self : Any , snake_case__ : int ):
lowerCamelCase_ : Any =num_of_nodes
lowerCamelCase_ : list[list[int]] =[]
lowerCamelCase_ : dict[int, int] ={}
def UpperCAmelCase__ ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int ):
self.m_edges.append([u_node, v_node, weight] )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCAmelCase__ ( self : Any , snake_case__ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCamelCase_ : Any =self.find_component(snake_case__ )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : int ):
if component_size[u_node] <= component_size[v_node]:
lowerCamelCase_ : Union[str, Any] =v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case__ )
elif component_size[u_node] >= component_size[v_node]:
lowerCamelCase_ : Union[str, Any] =self.find_component(snake_case__ )
component_size[u_node] += component_size[v_node]
self.set_component(snake_case__ )
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Optional[Any] =[]
lowerCamelCase_ : Dict =0
lowerCamelCase_ : list[Any] =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCamelCase_ : Tuple =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =edge
lowerCamelCase_ : int =self.m_component[u]
lowerCamelCase_ : Tuple =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCamelCase_ : str =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[int] =edge
lowerCamelCase_ : str =self.m_component[u]
lowerCamelCase_ : List[str] =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case__ , snake_case__ , snake_case__ )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
lowerCamelCase_ : Optional[Any] =[-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def _snake_case ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 144 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
A__ : Any = True
except (ImportError, ModuleNotFoundError):
A__ : str = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _snake_case ( lowerCamelCase__ : str ) -> str:
re.sub("<n>" , "" , lowerCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCamelCase__ ) )
| 144 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 |
'''simple docstring'''
from math import sqrt
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' must been an int and positive"
__lowercase = True
# 0 and 1 are none primes.
if number <= 1:
__lowercase = False
for divisor in range(2 , int(round(sqrt(A__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowercase = False
break
# precondition
assert isinstance(A__ , A__ ), "'status' must been from type bool"
return status
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowercase = list(range(2 , n + 1 ) )
__lowercase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(A__ ) ):
for j in range(i + 1 , len(A__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowercase = 0
# filters actual prime numbers.
__lowercase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n > 2), "'N' must been an int and > 2"
__lowercase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(A__ ):
ans.append(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and number >= 0, "'number' must been an int and >= 0"
__lowercase = [] # this list will be returns of the function.
# potential prime number factors.
__lowercase = 2
__lowercase = number
if number == 0 or number == 1:
ans.append(A__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(A__ ):
while quotient != 1:
if is_prime(A__ ) and (quotient % factor == 0):
ans.append(A__ )
quotient /= factor
else:
factor += 1
else:
ans.append(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(A__ )
__lowercase = max(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type int"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(A__ )
__lowercase = min(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type int"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , A__ ), "compare bust been from type bool"
return number % 2 == 0
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , A__ ), "compare bust been from type bool"
return number % 2 != 0
def _A ( A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ ) and (number > 2) and is_even(A__ )
), "'number' must been an int, even and > 2"
__lowercase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowercase = get_prime_numbers(A__ )
__lowercase = len(A__ )
# run variable for while-loops.
__lowercase = 0
__lowercase = None
# exit variable. for break up the loops
__lowercase = True
while i < len_pn and loop:
__lowercase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowercase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(A__ , A__ )
and (len(A__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowercase = 0
while numbera != 0:
__lowercase = numbera % numbera
__lowercase = numbera
__lowercase = rest
# precondition
assert isinstance(A__ , A__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowercase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowercase = prime_factorization(A__ )
__lowercase = prime_factorization(A__ )
elif numbera == 1 or numbera == 1:
__lowercase = []
__lowercase = []
__lowercase = max(A__ , A__ )
__lowercase = 0
__lowercase = 0
__lowercase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowercase = prime_fac_a.count(A__ )
__lowercase = prime_fac_a.count(A__ )
for _ in range(max(A__ , A__ ) ):
ans *= n
else:
__lowercase = prime_fac_a.count(A__ )
for _ in range(A__ ):
ans *= n
done.append(A__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowercase = prime_fac_a.count(A__ )
for _ in range(A__ ):
ans *= n
done.append(A__ )
# precondition
assert isinstance(A__ , A__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 0), "'number' must been a positive int"
__lowercase = 0
__lowercase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(A__ ):
ans += 1
# precondition
assert isinstance(A__ , A__ ) and is_prime(
A__ ), "'ans' must been a prime number and from type int"
return ans
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
is_prime(A__ ) and is_prime(A__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowercase = p_number_a + 1 # jump to the next number
__lowercase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(A__ ):
number += 1
while number < p_number_a:
ans.append(A__ )
number += 1
# fetch the next prime number.
while not is_prime(A__ ):
number += 1
# precondition
assert (
isinstance(A__ , A__ )
and ans[0] != p_number_a
and ans[len(A__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 1), "'n' must been int and >= 1"
__lowercase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(A__ )
# precondition
assert ans[0] == 1 and ans[len(A__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number > 1
), "'number' must been an int and >= 1"
__lowercase = get_divisors(A__ )
# precondition
assert (
isinstance(A__ , A__ )
and (divisors[0] == 1)
and (divisors[len(A__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowercase = gcd(abs(A__ ) , abs(A__ ) )
# precondition
assert (
isinstance(A__ , A__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 0), "'n' must been a int and >= 0"
__lowercase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 0), "'n' must been an int and >= 0"
__lowercase = 0
__lowercase = 1
__lowercase = 1 # this will be return
for _ in range(n - 1 ):
__lowercase = ans
ans += fiba
__lowercase = tmp
return ans
| 52 | 1 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__UpperCAmelCase = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def lowercase__ ( __snake_case : str = "dhaka" , __snake_case : int = 5 ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = min(__snake_case , 50 ) # Prevent abuse!
UpperCAmelCase_ : Dict = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
UpperCAmelCase_ : Any = requests.get('https://www.google.com/search' , params=__snake_case , headers=__snake_case )
UpperCAmelCase_ : Dict = BeautifulSoup(html.text , 'html.parser' )
UpperCAmelCase_ : Any = ''.join(
re.findall(R'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
UpperCAmelCase_ : int = json.dumps(__snake_case )
UpperCAmelCase_ : List[Any] = json.loads(__snake_case )
UpperCAmelCase_ : Union[str, Any] = re.findall(
R'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , __snake_case , )
if not matched_google_image_data:
return 0
UpperCAmelCase_ : Union[str, Any] = re.sub(
R'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(__snake_case ) , )
UpperCAmelCase_ : Optional[int] = re.findall(
R'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , __snake_case , )
for index, fixed_full_res_image in enumerate(__snake_case ):
if index >= max_images:
return index
UpperCAmelCase_ : Optional[int] = bytes(__snake_case , 'ascii' ).decode(
'unicode-escape' )
UpperCAmelCase_ : Union[str, Any] = bytes(__snake_case , 'ascii' ).decode(
'unicode-escape' )
UpperCAmelCase_ : Union[str, Any] = urllib.request.build_opener()
UpperCAmelCase_ : Dict = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(__snake_case )
UpperCAmelCase_ : Union[str, Any] = F"query_{query.replace(' ' , '_' )}"
if not os.path.exists(__snake_case ):
os.makedirs(__snake_case )
urllib.request.urlretrieve( # noqa: S310
__snake_case , F"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
__UpperCAmelCase = download_images_from_google_query(sys.argv[1])
print(F'{image_count} images were downloaded to disk.')
except IndexError:
print('Please provide a search term.')
raise
| 29 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any]=False ) -> Tuple:
"""simple docstring"""
try:
__UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__UpperCamelCase = strtobool(__lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
a__ : str =parse_flag_from_env('''RUN_SLOW''', default=False)
a__ : Union[str, Any] =parse_flag_from_env('''RUN_REMOTE''', default=False)
a__ : List[str] =parse_flag_from_env('''RUN_LOCAL''', default=True)
a__ : Optional[int] =parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
a__ : Any =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
a__ : Optional[int] =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
a__ : List[str] =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
a__ : Any =pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
a__ : Tuple =pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
a__ : Union[str, Any] =pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
a__ : int =pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def lowercase__ ( __lowercase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
__UpperCamelCase = unittest.skip('test requires faiss' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Union[str, Any] ) -> Any:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
__UpperCamelCase = unittest.skip('test requires regex' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Tuple ) -> List[Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
__UpperCamelCase = unittest.skip('test requires elasticsearch' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
__UpperCamelCase = unittest.skip('test requires sqlalchemy' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : List[str] ) -> List[str]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
__UpperCamelCase = unittest.skip('test requires PyTorch' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Optional[Any] ) -> List[str]:
"""simple docstring"""
if not config.TF_AVAILABLE:
__UpperCamelCase = unittest.skip('test requires TensorFlow' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
if not config.JAX_AVAILABLE:
__UpperCamelCase = unittest.skip('test requires JAX' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : str ) -> Optional[Any]:
"""simple docstring"""
if not config.PIL_AVAILABLE:
__UpperCamelCase = unittest.skip('test requires Pillow' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Dict ) -> Any:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : int ) -> int:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : str ) -> int:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : str ) -> Any:
"""simple docstring"""
def _require_spacy_model(__lowercase : Any ):
try:
import spacy # noqa F401
spacy.load(__lowercase )
except ImportError:
return unittest.skip('test requires spacy' )(__lowercase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__lowercase ) )(__lowercase )
else:
return test_case
return _require_spacy_model
def lowercase__ ( __lowercase : Union[str, Any] ) -> str:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : List[Any] ) -> List[str]:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
__UpperCamelCase = unittest.skip('test is slow' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : List[Any] ) -> List[str]:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
__UpperCamelCase = unittest.skip('test is local' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : str ) -> List[str]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
__UpperCamelCase = unittest.skip('test is packaged' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Optional[int] ) -> Any:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
__UpperCamelCase = unittest.skip('test requires remote' )(__lowercase )
return test_case
def lowercase__ ( *__lowercase : Optional[Any] ) -> Tuple:
"""simple docstring"""
def decorate(cls : int ):
for name, fn in cls.__dict__.items():
if callable(__lowercase ) and name.startswith('test' ):
for decorator in decorators:
__UpperCamelCase = decorator(__lowercase )
setattr(cls , __lowercase , __lowercase )
return cls
return decorate
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
pass
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =0
SCREAMING_SNAKE_CASE_ : List[Any] =1
SCREAMING_SNAKE_CASE_ : Union[str, Any] =2
@contextmanager
def lowercase__ ( __lowercase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , __lowercase : Dict=1e-16 ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = requests.Session().request
def timeout_request(__lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , **__lowercase : List[str] ):
# Change the url to an invalid url so that the connection hangs
__UpperCamelCase = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__UpperCamelCase = timeout
try:
return online_request(__lowercase , __lowercase , **__lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__UpperCamelCase = url
__UpperCamelCase = e.args[0]
__UpperCamelCase = (max_retry_error.args[0].replace('10.255.255.1' , F'''OfflineMock[{url}]''' ),)
__UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(__lowercase : int , __lowercase : List[str] , **__lowercase : Union[str, Any] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __lowercase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def lowercase__ ( *__lowercase : Any , **__lowercase : Dict ) -> Dict:
"""simple docstring"""
__UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__lowercase , **__lowercase ) as tmp_dir:
try:
os.chdir(__lowercase )
yield
finally:
os.chdir(__lowercase )
@contextmanager
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowercase__ ( __lowercase : List[str] , __lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
return deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist()
def lowercase__ ( __lowercase : str ) -> List[str]:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(__lowercase : List[Any] , *__lowercase : Tuple , **__lowercase : Union[str, Any] ):
try:
return func(*__lowercase , **__lowercase )
except HTTPError as err:
if str(__lowercase ).startswith('500' ) or str(__lowercase ).startswith('502' ):
pytest.xfail(str(__lowercase ) )
raise err
return decorator.decorator(_wrapper , __lowercase )
class snake_case :
"""simple docstring"""
def __init__( self : int , __A : Any , __A : str , __A : List[Any] ):
__UpperCamelCase = returncode
__UpperCamelCase = stdout
__UpperCamelCase = stderr
async def lowercase__ ( __lowercase : Any , __lowercase : Optional[int] ) -> str:
"""simple docstring"""
while True:
__UpperCamelCase = await stream.readline()
if line:
callback(__lowercase )
else:
break
async def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None , __lowercase : Any=None , __lowercase : Optional[Any]=None , __lowercase : int=False , __lowercase : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('\nRunning: ' , ' '.join(__lowercase ) )
__UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowercase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__UpperCamelCase = []
__UpperCamelCase = []
def tee(__lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : Tuple="" ):
__UpperCamelCase = line.decode('utf-8' ).rstrip()
sink.append(__lowercase )
if not quiet:
print(__lowercase , __lowercase , file=__lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __lowercase : tee(__lowercase , __lowercase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __lowercase : tee(__lowercase , __lowercase , sys.stderr , label='stderr:' ) ),
] , timeout=__lowercase , )
return _RunOutput(await p.wait() , __lowercase , __lowercase )
def lowercase__ ( __lowercase : Dict , __lowercase : Any=None , __lowercase : int=None , __lowercase : int=180 , __lowercase : int=False , __lowercase : str=True ) -> _RunOutput:
"""simple docstring"""
__UpperCamelCase = asyncio.get_event_loop()
__UpperCamelCase = loop.run_until_complete(
_stream_subprocess(__lowercase , env=__lowercase , stdin=__lowercase , timeout=__lowercase , quiet=__lowercase , echo=__lowercase ) )
__UpperCamelCase = ' '.join(__lowercase )
if result.returncode > 0:
__UpperCamelCase = '\n'.join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' )
return result
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__UpperCamelCase = re.sub(R'^gw' , '' , __lowercase , 0 , re.M )
return int(__lowercase )
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = 29500
__UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 53 | 0 |
"""simple docstring"""
from __future__ import annotations
import queue
class _lowerCamelCase :
def __init__(self , __a ) -> int:
UpperCamelCase = data
UpperCamelCase = None
UpperCamelCase = None
def a__ ( ):
"""simple docstring"""
print("\n********Press N to stop entering at any point of time********\n" )
UpperCamelCase = input("Enter the value of the root node: " ).strip().lower()
UpperCamelCase = queue.Queue()
UpperCamelCase = TreeNode(int(_SCREAMING_SNAKE_CASE ) )
q.put(_SCREAMING_SNAKE_CASE )
while not q.empty():
UpperCamelCase = q.get()
UpperCamelCase = F"Enter the left node of {node_found.data}: "
UpperCamelCase = input(_SCREAMING_SNAKE_CASE ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCamelCase = TreeNode(int(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = left_node
q.put(_SCREAMING_SNAKE_CASE )
UpperCamelCase = F"Enter the right node of {node_found.data}: "
UpperCamelCase = input(_SCREAMING_SNAKE_CASE ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCamelCase = TreeNode(int(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = right_node
q.put(_SCREAMING_SNAKE_CASE )
raise
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
UpperCamelCase = queue.Queue()
q.put(_SCREAMING_SNAKE_CASE )
while not q.empty():
UpperCamelCase = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
UpperCamelCase = queue.Queue()
q.put(_SCREAMING_SNAKE_CASE )
while not q.empty():
UpperCamelCase = []
while not q.empty():
UpperCamelCase = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
UpperCamelCase = []
UpperCamelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = n.left
# end of while means current node doesn't have left child
UpperCamelCase = stack.pop()
# start to traverse its right child
UpperCamelCase = n.right
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
UpperCamelCase = []
UpperCamelCase = node
while n or stack:
while n:
stack.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = n.left
UpperCamelCase = stack.pop()
print(n.data , end="," )
UpperCamelCase = n.right
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
UpperCamelCase , UpperCamelCase = [], []
UpperCamelCase = node
stacka.append(_SCREAMING_SNAKE_CASE )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCamelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_SCREAMING_SNAKE_CASE )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def a__ ( _SCREAMING_SNAKE_CASE = "" , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
UpperCamelCase , UpperCamelCase = divmod(width - len(_SCREAMING_SNAKE_CASE ) - 2 , 2 )
return F"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
lowerCAmelCase__ = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 50 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 369 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "facebook/bart-large-mnli"
UpperCAmelCase_ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
UpperCAmelCase_ = "text_classifier"
UpperCAmelCase_ = AutoTokenizer
UpperCAmelCase_ = AutoModelForSequenceClassification
UpperCAmelCase_ = ["text", ["text"]]
UpperCAmelCase_ = ["text"]
def snake_case_ (self ) -> List[Any]:
super().setup()
UpperCamelCase = self.model.config
UpperCamelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
UpperCamelCase = int(__a )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def snake_case_ (self , __a , __a ) -> List[Any]:
UpperCamelCase = labels
return self.pre_processor(
[text] * len(__a ) , [F"This example is {label}" for label in labels] , return_tensors="pt" , padding="max_length" , )
def snake_case_ (self , __a ) -> int:
UpperCamelCase = outputs.logits
UpperCamelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 244 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__UpperCamelCase : Union[str, Any] = 5_0_0_0_0_0
__UpperCamelCase : Optional[Any] = os.path.split(__file__)
__UpperCamelCase : Dict = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def __SCREAMING_SNAKE_CASE ( A_ , **A_ ):
lowerCAmelCase__ : List[str] = dataset.map(**_lowerCAmelCase )
@get_duration
def __SCREAMING_SNAKE_CASE ( A_ , **A_ ):
lowerCAmelCase__ : Tuple = dataset.filter(**_lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : str = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ : List[str] = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowerCAmelCase__ : Any = generate_example_dataset(
os.path.join(_lowerCAmelCase , '''dataset.arrow''' ) , _lowerCAmelCase , num_examples=_lowerCAmelCase )
lowerCAmelCase__ : Dict = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_lowerCAmelCase )
def tokenize(A_ ):
return tokenizer(examples['''text'''] )
lowerCAmelCase__ : str = map(_lowerCAmelCase )
lowerCAmelCase__ : Optional[Any] = map(_lowerCAmelCase , batched=_lowerCAmelCase )
lowerCAmelCase__ : Tuple = map(_lowerCAmelCase , function=lambda A_ : None , batched=_lowerCAmelCase )
with dataset.formatted_as(type='''numpy''' ):
lowerCAmelCase__ : str = map(_lowerCAmelCase , function=lambda A_ : None , batched=_lowerCAmelCase )
with dataset.formatted_as(type='''pandas''' ):
lowerCAmelCase__ : List[Any] = map(_lowerCAmelCase , function=lambda A_ : None , batched=_lowerCAmelCase )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowerCAmelCase__ : List[str] = map(_lowerCAmelCase , function=lambda A_ : None , batched=_lowerCAmelCase )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowerCAmelCase__ : Tuple = map(_lowerCAmelCase , function=lambda A_ : None , batched=_lowerCAmelCase )
lowerCAmelCase__ : str = map(_lowerCAmelCase , function=_lowerCAmelCase , batched=_lowerCAmelCase )
lowerCAmelCase__ : Optional[Any] = filter(_lowerCAmelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_lowerCAmelCase , '''wb''' ) as f:
f.write(json.dumps(_lowerCAmelCase ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 106 | """simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_a)
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True})
lowerCamelCase__ : ClassVar[Features] = Features({"text": Value("string")})
lowerCamelCase__ : ClassVar[Features] = Features({})
lowerCamelCase__ : str = "text"
@property
def _UpperCAmelCase ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 77 | 0 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase : List[str] = 10
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int , snake_case : list[int] , snake_case : int )-> int:
'''simple docstring'''
for i in range(snake_case , snake_case ):
if array[i] == target:
return i
return -1
def SCREAMING_SNAKE_CASE__ ( snake_case : list[int] , snake_case : int )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : List[Any] = len(snake_case )
while left <= right:
if right - left < precision:
return lin_search(snake_case , snake_case , snake_case , snake_case )
UpperCAmelCase__ : Union[str, Any] = (left + right) // 3 + 1
UpperCAmelCase__ : Union[str, Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
UpperCAmelCase__ : Any = one_third - 1
elif array[two_third] < target:
UpperCAmelCase__ : Any = two_third + 1
else:
UpperCAmelCase__ : str = one_third + 1
UpperCAmelCase__ : Union[str, Any] = two_third - 1
else:
return -1
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int , snake_case : list[int] , snake_case : int )-> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(snake_case , snake_case , snake_case , snake_case )
UpperCAmelCase__ : str = (left + right) // 3 + 1
UpperCAmelCase__ : Any = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(snake_case , one_third - 1 , snake_case , snake_case )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , snake_case , snake_case , snake_case )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , snake_case , snake_case )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Tuple = input("""Enter numbers separated by comma:\n""").strip()
_lowerCAmelCase : str = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
_lowerCAmelCase : List[str] = int(input("""Enter the number to be found in the list:\n""").strip())
_lowerCAmelCase : Optional[Any] = ite_ternary_search(collection, target)
_lowerCAmelCase : Dict = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print("""Not found""")
| 298 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''efficientformer'''
def __init__( self : List[Any] , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : str , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : List[str] = hidden_sizes
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : List[Any] = layer_norm_eps
UpperCAmelCase__ : Optional[int] = patch_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : Optional[int] = depths
UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio
UpperCAmelCase__ : Dict = downsamples
UpperCAmelCase__ : Any = dim
UpperCAmelCase__ : str = key_dim
UpperCAmelCase__ : List[Any] = attention_ratio
UpperCAmelCase__ : Optional[Any] = resolution
UpperCAmelCase__ : Optional[Any] = pool_size
UpperCAmelCase__ : Any = downsample_patch_size
UpperCAmelCase__ : int = downsample_stride
UpperCAmelCase__ : Dict = downsample_pad
UpperCAmelCase__ : List[Any] = drop_path_rate
UpperCAmelCase__ : Optional[Any] = num_metaad_blocks
UpperCAmelCase__ : List[str] = distillation
UpperCAmelCase__ : Dict = use_layer_scale
UpperCAmelCase__ : List[Any] = layer_scale_init_value
UpperCAmelCase__ : Optional[Any] = image_size
UpperCAmelCase__ : Optional[int] = batch_norm_eps
| 298 | 1 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class A_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : int = MobileBertTokenizer
_UpperCamelCase : List[str] = MobileBertTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : Optional[int] = filter_non_english
_UpperCamelCase : List[str] = """google/mobilebert-uncased"""
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowercase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = 'UNwant\u00E9d,running'
lowercase = 'unwanted, running'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.tokenizer_class(self.vocab_file )
lowercase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [9, 6, 7, 12, 10, 11] )
def SCREAMING_SNAKE_CASE__ ( self ):
if not self.test_rust_tokenizer:
return
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
lowercase = 'UNwant\u00E9d,running'
lowercase = tokenizer.tokenize(snake_case )
lowercase = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
lowercase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
lowercase = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
lowercase = self.get_rust_tokenizer()
lowercase = tokenizer.encode(snake_case )
lowercase = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
# With lower casing
lowercase = self.get_tokenizer(do_lower_case=snake_case )
lowercase = self.get_rust_tokenizer(do_lower_case=snake_case )
lowercase = 'UNwant\u00E9d,running'
lowercase = tokenizer.tokenize(snake_case )
lowercase = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
lowercase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
lowercase = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
lowercase = self.get_rust_tokenizer()
lowercase = tokenizer.encode(snake_case )
lowercase = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = BasicTokenizer(do_lower_case=snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = BasicTokenizer(do_lower_case=snake_case , strip_accents=snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = BasicTokenizer(do_lower_case=snake_case , strip_accents=snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = BasicTokenizer(do_lower_case=snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = BasicTokenizer(do_lower_case=snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = BasicTokenizer(do_lower_case=snake_case , strip_accents=snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = BasicTokenizer(do_lower_case=snake_case , strip_accents=snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = BasicTokenizer(do_lower_case=snake_case , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowercase = {}
for i, token in enumerate(snake_case ):
lowercase = i
lowercase = WordpieceTokenizer(vocab=snake_case , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def SCREAMING_SNAKE_CASE__ ( self ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(snake_case ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(snake_case ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.tokenizer_class.from_pretrained('google/mobilebert-uncased' )
lowercase = tokenizer.encode('sequence builders' , add_special_tokens=snake_case )
lowercase = tokenizer.encode('multi-sequence build' , add_special_tokens=snake_case )
lowercase = tokenizer.build_inputs_with_special_tokens(snake_case )
lowercase = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def SCREAMING_SNAKE_CASE__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
lowercase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
lowercase = tokenizer_r.encode_plus(
snake_case , return_attention_mask=snake_case , return_token_type_ids=snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case , )
lowercase = tokenizer_r.do_lower_case if hasattr(snake_case , 'do_lower_case' ) else False
lowercase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ['的', '人', '有']
lowercase = ''.join(snake_case )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase = True
lowercase = self.tokenizer_class.from_pretrained(snake_case , **snake_case )
lowercase = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
lowercase = tokenizer_p.encode(snake_case , add_special_tokens=snake_case )
lowercase = tokenizer_r.encode(snake_case , add_special_tokens=snake_case )
lowercase = tokenizer_r.convert_ids_to_tokens(snake_case )
lowercase = tokenizer_p.convert_ids_to_tokens(snake_case )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(snake_case , snake_case )
lowercase = False
lowercase = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
lowercase = self.tokenizer_class.from_pretrained(snake_case , **snake_case )
lowercase = tokenizer_r.encode(snake_case , add_special_tokens=snake_case )
lowercase = tokenizer_p.encode(snake_case , add_special_tokens=snake_case )
lowercase = tokenizer_r.convert_ids_to_tokens(snake_case )
lowercase = tokenizer_p.convert_ids_to_tokens(snake_case )
# it is expected that only the first Chinese character is not preceded by "##".
lowercase = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(snake_case )
]
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(snake_case , snake_case )
| 195 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A_ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
return None
class A_ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
return None
class A_ ( unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(snake_case , 'tf' , 12 , **snake_case )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(snake_case , 'pt' , 12 , **snake_case )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
from transformers import BertModel
lowercase = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(snake_case ) )
vocab_file.flush()
lowercase = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase = BertModel(BertConfig(vocab_size=len(snake_case ) ) )
model.save_pretrained(snake_case )
self._test_export(snake_case , 'pt' , 12 , snake_case )
@require_tf
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase = self._test_export(snake_case , 'tf' , 12 , **snake_case )
lowercase = quantize(Path(snake_case ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(snake_case ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase = self._test_export(snake_case , 'pt' , 12 , **snake_case )
lowercase = quantize(snake_case )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(snake_case ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case=None , **snake_case ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase = Path(snake_case ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case )
return path
except Exception as e:
self.fail(snake_case )
@require_torch
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
from transformers import BertModel
lowercase = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(snake_case , snake_case , 'pt' )
@require_tf
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
from transformers import TFBertModel
lowercase = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(snake_case , snake_case , 'tf' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = FeatureExtractionPipeline(snake_case , snake_case )
lowercase = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowercase , lowercase , lowercase , lowercase = infer_shapes(snake_case , snake_case )
# Assert all variables are present
self.assertEqual(len(snake_case ) , len(snake_case ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , snake_case )
self.assertSequenceEqual(variable_names[3:] , snake_case )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ['input_ids', 'attention_mask', 'token_type_ids']
lowercase = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowercase , lowercase = ensure_valid_input(FuncContiguousArgs() , snake_case , snake_case )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(snake_case ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(snake_case ) , set(snake_case ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(snake_case , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase , lowercase = ensure_valid_input(FuncNonContiguousArgs() , snake_case , snake_case )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(snake_case ) , 1 )
self.assertEqual(len(snake_case ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 195 | 1 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def _snake_case ( UpperCamelCase : int ):
UpperCAmelCase : List[Any] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(UpperCAmelCase__ , max_perimeter + 1 ):
UpperCAmelCase : Tuple = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(UpperCAmelCase__ ):
UpperCAmelCase : Dict = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _snake_case ( UpperCamelCase : List[str] = 1000 ):
UpperCAmelCase : Union[str, Any] = pythagorean_triple(UpperCAmelCase__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 362 |
"""simple docstring"""
import math
import sys
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Dict = """"""
try:
with open(UpperCamelCase , """rb""" ) as binary_file:
UpperCAmelCase : str = binary_file.read()
for dat in data:
UpperCAmelCase : List[Any] = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Optional[int] = {"""0""": """0""", """1""": """1"""}
UpperCAmelCase , UpperCAmelCase : Optional[int] = """""", """"""
UpperCAmelCase : int = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCAmelCase : Any = lexicon[curr_string]
result += last_match_id
UpperCAmelCase : Any = last_match_id + """0"""
if math.loga(UpperCamelCase ).is_integer():
UpperCAmelCase : Optional[Any] = {}
for curr_key in list(UpperCamelCase ):
UpperCAmelCase : Dict = lexicon.pop(UpperCamelCase )
UpperCAmelCase : int = new_lex
UpperCAmelCase : int = last_match_id + """1"""
index += 1
UpperCAmelCase : List[str] = """"""
return result
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : Dict = 8
try:
with open(UpperCamelCase , """wb""" ) as opened_file:
UpperCAmelCase : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Any = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCAmelCase : List[str] = data_bits[counter:]
UpperCAmelCase : Tuple = data_bits[counter + 1 :]
return data_bits
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : int = read_file_binary(UpperCamelCase )
UpperCAmelCase : str = remove_prefix(UpperCamelCase )
UpperCAmelCase : Any = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 76 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : List[Any] = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase , unittest.TestCase):
_a = SpeechTaTokenizer
_a = False
_a = True
def SCREAMING_SNAKE_CASE ( self: int ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase :List[Any] = SpeechTaTokenizer(_lowerCAmelCase )
lowercase :Union[str, Any] = AddedToken("<mask>" , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase )
lowercase :Any = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: int ):
lowercase :Any = "this is a test"
lowercase :Optional[int] = "this is a test"
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Union[str, Any]=False , _lowerCAmelCase: Union[str, Any]=20 , _lowerCAmelCase: int=5 ):
lowercase , lowercase :List[Any] = self.get_input_output_texts(_lowerCAmelCase )
lowercase :str = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
lowercase :Any = tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
return text, ids
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :Union[str, Any] = "<pad>"
lowercase :Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(_lowerCAmelCase ) , 81 )
def SCREAMING_SNAKE_CASE ( self: List[str] ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def SCREAMING_SNAKE_CASE ( self: Dict ):
lowercase :Tuple = self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
lowercase :List[Any] = tokenizer.vocab_size
lowercase :Dict = len(_lowerCAmelCase )
self.assertNotEqual(_lowerCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowercase :int = ["aaaaa bbbbbb", "cccccccccdddddddd"]
lowercase :Optional[Any] = tokenizer.add_tokens(_lowerCAmelCase )
lowercase :int = tokenizer.vocab_size
lowercase :Any = len(_lowerCAmelCase )
self.assertNotEqual(_lowerCAmelCase , 0 )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , len(_lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , all_size + len(_lowerCAmelCase ) )
lowercase :Tuple = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=_lowerCAmelCase )
self.assertGreaterEqual(len(_lowerCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowercase :Union[str, Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
lowercase :Union[str, Any] = tokenizer.add_special_tokens(_lowerCAmelCase )
lowercase :List[Any] = tokenizer.vocab_size
lowercase :List[str] = len(_lowerCAmelCase )
self.assertNotEqual(_lowerCAmelCase , 0 )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , len(_lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , all_size_a + len(_lowerCAmelCase ) )
lowercase :Tuple = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=_lowerCAmelCase )
self.assertGreaterEqual(len(_lowerCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE ( self: str ):
pass
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :Optional[int] = self.get_tokenizer()
lowercase :Optional[int] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(_lowerCAmelCase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
lowercase :Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
lowercase :Dict = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
# fmt: off
self.assertListEqual(_lowerCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
lowercase :Any = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def SCREAMING_SNAKE_CASE ( self: List[str] ):
# Use custom sequence because this tokenizer does not handle numbers.
lowercase :List[str] = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
lowercase :Any = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=_lowerCAmelCase , )
| 236 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_UpperCAmelCase : Dict = {"tokenization_tapex": ["TapexTokenizer"]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 236 | 1 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = s.rsplit(_UpperCAmelCase , _UpperCAmelCase )
return new.join(_UpperCAmelCase )
def A_ ( _UpperCAmelCase ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple = {}
SCREAMING_SNAKE_CASE_: int = ["group_1", "group_2", "group_3", "group_4"]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
SCREAMING_SNAKE_CASE_: Any = key.replace(f"{group_key}." , f"{group_key}.group." )
if "res_path" in key:
SCREAMING_SNAKE_CASE_: Optional[int] = key.replace("res_path." , "res_path.path." )
if key.endswith(".w" ):
SCREAMING_SNAKE_CASE_: Tuple = rreplace(_UpperCAmelCase , ".w" , ".weight" , 1 )
if key.endswith(".b" ):
SCREAMING_SNAKE_CASE_: str = rreplace(_UpperCAmelCase , ".b" , ".bias" , 1 )
SCREAMING_SNAKE_CASE_: List[str] = value.float()
return upgrade
@torch.no_grad()
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=True ):
from dall_e import Encoder
SCREAMING_SNAKE_CASE_: Tuple = Encoder()
if os.path.exists(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = torch.load(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_: int = torch.hub.load_state_dict_from_url(_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = ckpt.state_dict()
encoder.load_state_dict(_UpperCAmelCase )
if config_path is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlavaImageCodebookConfig.from_pretrained(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_: Dict = FlavaImageCodebookConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlavaImageCodebook(_UpperCAmelCase ).eval()
SCREAMING_SNAKE_CASE_: List[Any] = encoder.state_dict()
SCREAMING_SNAKE_CASE_: List[Any] = upgrade_state_dict(_UpperCAmelCase )
hf_model.load_state_dict(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any = hf_model.state_dict()
SCREAMING_SNAKE_CASE_: Union[str, Any] = count_parameters(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = count_parameters(_UpperCAmelCase )
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_UpperCAmelCase )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCAmelCase : Dict = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 127 |
def A_ ( _UpperCAmelCase = 10**9 ):
SCREAMING_SNAKE_CASE_: List[str] = 1
SCREAMING_SNAKE_CASE_: Optional[int] = 2
SCREAMING_SNAKE_CASE_: int = 0
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: List[str] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
SCREAMING_SNAKE_CASE_: Any = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 127 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=1_3 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : str=4 , UpperCamelCase__ : List[str]=3_7 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Union[str, Any]=5_1_2 , UpperCamelCase__ : Optional[Any]=1_6 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : str=0.0_2 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : List[str]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def A ( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = OpenLlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
UpperCamelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = OpenLlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , ):
"""simple docstring"""
UpperCamelCase = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )['hidden_states'][0]
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )['hidden_states'][0]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_SCREAMING_SNAKE_CASE = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = OpenLlamaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(UpperCamelCase__ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'single_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(UpperCamelCase__ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'multi_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(UpperCamelCase__ )
UpperCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def A ( self : Tuple ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def A ( self : Union[str, Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ids_tensor([1, 1_0] , config.vocab_size )
UpperCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = OpenLlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
UpperCamelCase = original_model(UpperCamelCase__ ).last_hidden_state
UpperCamelCase = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = {'type': scaling_type, 'factor': 1_0.0}
UpperCamelCase = OpenLlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
UpperCamelCase = scaled_model(UpperCamelCase__ ).last_hidden_state
UpperCamelCase = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
| 28 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A = {
"yjernite/retribert-base-uncased": 512,
}
__A = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class A ( __UpperCAmelCase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : str = RetriBertTokenizer
lowerCamelCase : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__="[UNK]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[PAD]" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase__ ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase__ , normalizer_state.pop("""type""" ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase__ )
lowercase__ = do_lower_case
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> Dict:
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 164 | 0 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_SCREAMING_SNAKE_CASE : List[str] = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
_SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(f)
@require_torch
class __a ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict , lowercase_ : Optional[int] ):
return FSMTTokenizer.from_pretrained(lowercase_ )
def _lowerCAmelCase ( self : Dict , lowercase_ : List[Any] ):
UpperCamelCase__ : Any =FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 2_6.0],
['''ru-en''', 2_2.0],
['''en-de''', 2_2.0],
['''de-en''', 2_9.0],
] )
@slow
def _lowerCAmelCase ( self : str , lowercase_ : List[str] , lowercase_ : Tuple ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCamelCase__ : Any =f'''facebook/wmt19-{pair}'''
UpperCamelCase__ : int =self.get_tokenizer(lowercase_ )
UpperCamelCase__ : Any =self.get_model(lowercase_ )
UpperCamelCase__ : Union[str, Any] =bleu_data[pair]['''src''']
UpperCamelCase__ : Tuple =bleu_data[pair]['''tgt''']
UpperCamelCase__ : str =tokenizer(lowercase_ , return_tensors='''pt''' , truncation=lowercase_ , padding='''longest''' ).to(lowercase_ )
UpperCamelCase__ : List[Any] =model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCamelCase__ : Union[str, Any] =tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCamelCase__ : Optional[int] =calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['''bleu'''] , lowercase_ )
| 157 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'unispeech'
def __init__( self : List[Any] , lowercase_ : Tuple=32 , lowercase_ : int=768 , lowercase_ : List[Any]=12 , lowercase_ : Optional[int]=12 , lowercase_ : Union[str, Any]=3072 , lowercase_ : Any="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[str]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : List[str]=0.0_2 , lowercase_ : int=1e-5 , lowercase_ : Dict="group" , lowercase_ : Optional[Any]="gelu" , lowercase_ : List[Any]=(512, 512, 512, 512, 512, 512, 512) , lowercase_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : Any=False , lowercase_ : Dict=128 , lowercase_ : List[str]=16 , lowercase_ : Any=False , lowercase_ : Optional[Any]=True , lowercase_ : List[str]=0.0_5 , lowercase_ : int=10 , lowercase_ : Optional[Any]=2 , lowercase_ : List[str]=0.0 , lowercase_ : List[Any]=10 , lowercase_ : Union[str, Any]=0 , lowercase_ : Dict=320 , lowercase_ : Optional[Any]=2 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Dict=100 , lowercase_ : Optional[int]=256 , lowercase_ : Dict=256 , lowercase_ : Optional[int]=0.1 , lowercase_ : str="mean" , lowercase_ : Union[str, Any]=False , lowercase_ : Any=False , lowercase_ : Union[str, Any]=256 , lowercase_ : List[str]=80 , lowercase_ : Dict=0 , lowercase_ : int=1 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=0.5 , **lowercase_ : str , ):
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
UpperCamelCase__ : Dict =hidden_size
UpperCamelCase__ : Optional[int] =feat_extract_norm
UpperCamelCase__ : Dict =feat_extract_activation
UpperCamelCase__ : Union[str, Any] =list(lowercase_ )
UpperCamelCase__ : int =list(lowercase_ )
UpperCamelCase__ : Tuple =list(lowercase_ )
UpperCamelCase__ : List[str] =conv_bias
UpperCamelCase__ : Any =num_conv_pos_embeddings
UpperCamelCase__ : List[Any] =num_conv_pos_embedding_groups
UpperCamelCase__ : Optional[int] =len(self.conv_dim )
UpperCamelCase__ : Union[str, Any] =num_hidden_layers
UpperCamelCase__ : Optional[Any] =intermediate_size
UpperCamelCase__ : Any =hidden_act
UpperCamelCase__ : List[Any] =num_attention_heads
UpperCamelCase__ : List[Any] =hidden_dropout
UpperCamelCase__ : List[Any] =attention_dropout
UpperCamelCase__ : Tuple =activation_dropout
UpperCamelCase__ : Any =feat_proj_dropout
UpperCamelCase__ : Tuple =final_dropout
UpperCamelCase__ : Tuple =layerdrop
UpperCamelCase__ : int =layer_norm_eps
UpperCamelCase__ : Optional[int] =initializer_range
UpperCamelCase__ : Any =num_ctc_classes
UpperCamelCase__ : Optional[int] =vocab_size
UpperCamelCase__ : int =do_stable_layer_norm
UpperCamelCase__ : Union[str, Any] =use_weighted_layer_sum
UpperCamelCase__ : Tuple =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ : List[Any] =apply_spec_augment
UpperCamelCase__ : List[Any] =mask_time_prob
UpperCamelCase__ : Optional[int] =mask_time_length
UpperCamelCase__ : Dict =mask_time_min_masks
UpperCamelCase__ : str =mask_feature_prob
UpperCamelCase__ : Union[str, Any] =mask_feature_length
UpperCamelCase__ : int =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase__ : Optional[Any] =num_codevectors_per_group
UpperCamelCase__ : Dict =num_codevector_groups
UpperCamelCase__ : int =contrastive_logits_temperature
UpperCamelCase__ : Tuple =feat_quantizer_dropout
UpperCamelCase__ : List[str] =num_negatives
UpperCamelCase__ : Dict =codevector_dim
UpperCamelCase__ : Any =proj_codevector_dim
UpperCamelCase__ : List[Any] =diversity_loss_weight
# ctc loss
UpperCamelCase__ : Tuple =ctc_loss_reduction
UpperCamelCase__ : List[str] =ctc_zero_infinity
# pretraining loss
UpperCamelCase__ : Optional[Any] =replace_prob
@property
def _lowerCAmelCase ( self : List[str] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 157 | 1 |
'''simple docstring'''
_lowercase : str = tuple[float, float, float]
_lowercase : List[Any] = tuple[float, float, float]
def lowerCamelCase ( UpperCAmelCase__ : Pointad , UpperCAmelCase__ : Pointad ) -> Vectorad:
lowercase_ : List[str] = end_pointa[0] - end_pointa[0]
lowercase_ : Union[str, Any] = end_pointa[1] - end_pointa[1]
lowercase_ : List[Any] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def lowerCamelCase ( UpperCAmelCase__ : Vectorad , UpperCAmelCase__ : Vectorad ) -> Vectorad:
lowercase_ : List[Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowercase_ : Union[str, Any] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowercase_ : List[str] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def lowerCamelCase ( UpperCAmelCase__ : Vectorad , UpperCAmelCase__ : int ) -> bool:
return tuple(round(UpperCAmelCase__ , UpperCAmelCase__ ) for x in vector ) == (0, 0, 0)
def lowerCamelCase ( UpperCAmelCase__ : Pointad , UpperCAmelCase__ : Pointad , UpperCAmelCase__ : Pointad , UpperCAmelCase__ : int = 10 ) -> bool:
lowercase_ : Dict = create_vector(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Optional[int] = create_vector(UpperCAmelCase__ , UpperCAmelCase__ )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ )
| 239 | '''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowercase : List[str] = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_lowercase : Tuple = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
_lowercase : Optional[int] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : int ):
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ):
lowercase_ : int = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowercase_ : Optional[int] = [[refs[i] for refs in references] for i in range(lowercase_ )]
lowercase_ : List[str] = TER(
normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , )
lowercase_ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 239 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 359 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
a :Optional[Any] = logging.get_logger(__name__)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> List[str]:
# Recurse if needed
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : List[Any] = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : Dict = getattr(__lowerCAmelCase , __lowerCAmelCase )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module
SCREAMING_SNAKE_CASE__ : Any = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
SCREAMING_SNAKE_CASE__ : List[str] = tensor_name in module._buffers
SCREAMING_SNAKE_CASE__ : Dict = getattr(__lowerCAmelCase , __lowerCAmelCase )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : str = False
if is_buffer or not is_bitsandbytes_available():
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : List[Any] = False
else:
SCREAMING_SNAKE_CASE__ : str = hasattr(bnb.nn , """Params4bit""" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
SCREAMING_SNAKE_CASE__ : str = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
SCREAMING_SNAKE_CASE__ : Dict = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
SCREAMING_SNAKE_CASE__ : Tuple = old_value.to(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : int = value.to("""cpu""" )
if value.dtype == torch.inta:
SCREAMING_SNAKE_CASE__ : str = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(__lowerCAmelCase , device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __lowerCAmelCase ) and fpaa_statistics is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = new_value.T
SCREAMING_SNAKE_CASE__ : Union[str, Any] = old_value.__dict__
if is_abit:
SCREAMING_SNAKE_CASE__ : str = bnb.nn.IntaParams(__lowerCAmelCase , requires_grad=__lowerCAmelCase , **__lowerCAmelCase ).to(__lowerCAmelCase )
elif is_abit:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bnb.nn.Paramsabit(__lowerCAmelCase , requires_grad=__lowerCAmelCase , **__lowerCAmelCase ).to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight , """SCB""" , fpaa_statistics.to(__lowerCAmelCase ) )
else:
if value is None:
SCREAMING_SNAKE_CASE__ : str = old_value.to(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : List[str] = value.to(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(__lowerCAmelCase , device=__lowerCAmelCase )
if is_buffer:
SCREAMING_SNAKE_CASE__ : List[str] = new_value
else:
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Parameter(__lowerCAmelCase , requires_grad=old_value.requires_grad )
SCREAMING_SNAKE_CASE__ : Dict = new_value
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False ) -> List[Any]:
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
current_key_name.append(__lowerCAmelCase )
if (isinstance(__lowerCAmelCase , nn.Linear ) or isinstance(__lowerCAmelCase , __lowerCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(__lowerCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = module.weight.shape
else:
SCREAMING_SNAKE_CASE__ : str = module.in_features
SCREAMING_SNAKE_CASE__ : Dict = module.out_features
if quantization_config.quantization_method() == "llm_int8":
SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.LinearabitLt(
__lowerCAmelCase , __lowerCAmelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
SCREAMING_SNAKE_CASE__ : Tuple = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = bnb.nn.Linearabit(
__lowerCAmelCase , __lowerCAmelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
SCREAMING_SNAKE_CASE__ : int = True
# Store the module class in case we need to transpose the weight later
SCREAMING_SNAKE_CASE__ : Dict = type(__lowerCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowerCAmelCase )
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = _replace_with_bnb_linear(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_been_replaced=__lowerCAmelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> str:
SCREAMING_SNAKE_CASE__ : int = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = _replace_with_bnb_linear(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , __lowerCAmelCase , )
return replace_with_bnb_linear(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , __lowerCAmelCase , )
return set_module_quantized_tensor_to_device(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] = deepcopy(__lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
SCREAMING_SNAKE_CASE__ : List[str] = find_tied_parameters(__lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = sum(__lowerCAmelCase , [] )
SCREAMING_SNAKE_CASE__ : str = len(__lowerCAmelCase ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Optional[int] = not hasattr(__lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : int = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : str = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : Any = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = list(set(__lowerCAmelCase ) ) + list(__lowerCAmelCase )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Any = [""".weight""", """.bias"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace(__lowerCAmelCase , """""" )
filtered_module_names.append(__lowerCAmelCase )
return filtered_module_names
| 56 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def a ( __a="" ) -> str:
'''simple docstring'''
UpperCamelCase__ :Dict = tempfile.mkdtemp()
return os.path.join(__a , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
UpperCamelCase__ :str = AgentAudio(UpperCamelCase_ )
UpperCamelCase__ :Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
# Ensure that the file contains the same value as the original tensor
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = sf.read(UpperCamelCase_ )
self.assertTrue(torch.allclose(UpperCamelCase_ , torch.tensor(UpperCamelCase_ ) , atol=1e-4 ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = torch.rand(12 , dtype=torch.floataa ) - 0.5
UpperCamelCase__ :Optional[Any] = get_new_path(suffix='''.wav''' )
sf.write(UpperCamelCase_ , UpperCamelCase_ , 16000 )
UpperCamelCase__ :List[Any] = AgentAudio(UpperCamelCase_ )
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , UpperCamelCase_ )
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = torch.randint(0 , 256 , (64, 64, 3) )
UpperCamelCase__ :List[str] = AgentImage(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
UpperCamelCase__ :str = Image.open(UpperCamelCase_ )
UpperCamelCase__ :int = AgentImage(UpperCamelCase_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
UpperCamelCase__ :List[Any] = Image.open(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = AgentImage(UpperCamelCase_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = '''Hey!'''
UpperCamelCase__ :str = AgentText(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , agent_type.to_string() )
self.assertEqual(UpperCamelCase_ , agent_type.to_raw() )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) | 97 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a ( a_ ):
UpperCAmelCase_ : List[Any] =["image_processor", "tokenizer"]
UpperCAmelCase_ : str ="AutoImageProcessor"
UpperCAmelCase_ : Any ="AutoTokenizer"
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowerCamelCase , )
lowercase = kwargs.pop('feature_extractor' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_lowerCamelCase , _lowerCamelCase )
lowercase = self.image_processor
lowercase = False
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
lowercase = kwargs.pop('images' , _lowerCamelCase )
lowercase = kwargs.pop('text' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowercase = args[0]
lowercase = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
lowercase = self.image_processor(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if text is not None:
lowercase = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowercase = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def UpperCamelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def UpperCamelCase_ ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
lowercase = True
lowercase = self.tokenizer
yield
lowercase = self.image_processor
lowercase = False
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=None ):
if added_vocab is None:
lowercase = self.tokenizer.get_added_vocab()
lowercase = {}
while tokens:
lowercase = re.search(R'<s_(.*?)>' , _lowerCamelCase , re.IGNORECASE )
if start_token is None:
break
lowercase = start_token.group(1 )
lowercase = re.search(RF'</s_{key}>' , _lowerCamelCase , re.IGNORECASE )
lowercase = start_token.group()
if end_token is None:
lowercase = tokens.replace(_lowerCamelCase , '' )
else:
lowercase = end_token.group()
lowercase = re.escape(_lowerCamelCase )
lowercase = re.escape(_lowerCamelCase )
lowercase = re.search(F'{start_token_escaped}(.*?){end_token_escaped}' , _lowerCamelCase , re.IGNORECASE )
if content is not None:
lowercase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowercase = self.tokenajson(_lowerCamelCase , is_inner_value=_lowerCamelCase , added_vocab=_lowerCamelCase )
if value:
if len(_lowerCamelCase ) == 1:
lowercase = value[0]
lowercase = value
else: # leaf nodes
lowercase = []
for leaf in content.split(R'<sep/>' ):
lowercase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowercase = leaf[1:-2] # for categorical special tokens
output[key].append(_lowerCamelCase )
if len(output[key] ) == 1:
lowercase = output[key][0]
lowercase = tokens[tokens.find(_lowerCamelCase ) + len(_lowerCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_lowerCamelCase , added_vocab=_lowerCamelCase )
if len(_lowerCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase_ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowerCamelCase , )
return self.image_processor_class
@property
def UpperCamelCase_ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowerCamelCase , )
return self.image_processor
| 220 | 0 |
import os
import numpy
import onnx
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCAmelCase = a.name
__lowerCAmelCase = b.name
__lowerCAmelCase = ""
__lowerCAmelCase = ""
__lowerCAmelCase = a == b
__lowerCAmelCase = name_a
__lowerCAmelCase = name_b
return res
def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_graph_replace_input_with(node_proto.attribute[1].g , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] ):
for n in graph_proto.node:
_node_replace_input_with(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str ):
__lowerCAmelCase = list(model.graph.initializer )
__lowerCAmelCase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__lowerCAmelCase = inits[i].name
__lowerCAmelCase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCAmelCase = os.path.dirname(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = os.path.basename(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = onnx.load(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__lowerCAmelCase = list(model.graph.initializer )
__lowerCAmelCase = set()
__lowerCAmelCase = {}
__lowerCAmelCase = []
__lowerCAmelCase = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(SCREAMING_SNAKE_CASE_ )
dup_set.add(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = inits[j].data_type
__lowerCAmelCase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , SCREAMING_SNAKE_CASE_ )
total_reduced_size += mem_size
__lowerCAmelCase = inits[i].name
__lowerCAmelCase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(SCREAMING_SNAKE_CASE_ )
else:
__lowerCAmelCase = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 10_24 / 10_24 / 10_24 , "GB" )
__lowerCAmelCase = sorted(SCREAMING_SNAKE_CASE_ )
_remove_dup_initializers_from_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = "optimized_" + model_file_name
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
onnx.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return new_model
| 102 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__ ( unittest.TestCase ):
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.dummy_uncond_unet
__lowerCAmelCase = ScoreSdeVeScheduler()
__lowerCAmelCase = ScoreSdeVePipeline(unet=_A , scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=_A ).images
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=_A , return_dict=_A )[
0
]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "google/ncsnpp-church-256"
__lowerCAmelCase = UNetaDModel.from_pretrained(_A )
__lowerCAmelCase = ScoreSdeVeScheduler.from_pretrained(_A )
__lowerCAmelCase = ScoreSdeVePipeline(unet=_A , scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sde_ve(num_inference_steps=1_0 , output_type="numpy" , generator=_A ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__lowerCAmelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 102 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase_ = 'hf-internal-testing/tiny-random-bert'
lowercase_ = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
lowercase_ = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = cached_file(_lowerCamelCase, _lowerCamelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_lowerCamelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_lowerCamelCase, _lowerCamelCase ) ) )
with open(os.path.join(_lowerCamelCase, '''refs''', '''main''' ) ) as f:
__A = f.read()
self.assertEqual(_lowerCamelCase, os.path.join(_lowerCamelCase, '''snapshots''', _lowerCamelCase, _lowerCamelCase ) )
self.assertTrue(os.path.isfile(_lowerCamelCase ) )
# File is cached at the same place the second time.
__A = cached_file(_lowerCamelCase, _lowerCamelCase )
self.assertEqual(_lowerCamelCase, _lowerCamelCase )
# Using a specific revision to test the full commit hash.
__A = cached_file(_lowerCamelCase, _lowerCamelCase, revision='''9b8c223''' )
self.assertEqual(_lowerCamelCase, os.path.join(_lowerCamelCase, '''snapshots''', _lowerCamelCase, _lowerCamelCase ) )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
with self.assertRaisesRegex(_lowerCamelCase, '''is not a valid model identifier''' ):
__A = cached_file('''tiny-random-bert''', _lowerCamelCase )
with self.assertRaisesRegex(_lowerCamelCase, '''is not a valid git identifier''' ):
__A = cached_file(_lowerCamelCase, _lowerCamelCase, revision='''aaaa''' )
with self.assertRaisesRegex(_lowerCamelCase, '''does not appear to have a file named''' ):
__A = cached_file(_lowerCamelCase, '''conf''' )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
with self.assertRaisesRegex(_lowerCamelCase, '''does not appear to have a file named''' ):
__A = cached_file(_lowerCamelCase, '''conf''' )
with open(os.path.join(_lowerCamelCase, '''refs''', '''main''' ) ) as f:
__A = f.read()
self.assertTrue(os.path.isfile(os.path.join(_lowerCamelCase, '''.no_exist''', _lowerCamelCase, '''conf''' ) ) )
__A = cached_file(_lowerCamelCase, '''conf''', _raise_exceptions_for_missing_entries=_lowerCamelCase )
self.assertIsNone(_lowerCamelCase )
__A = cached_file(_lowerCamelCase, '''conf''', local_files_only=_lowerCamelCase, _raise_exceptions_for_missing_entries=_lowerCamelCase )
self.assertIsNone(_lowerCamelCase )
__A = mock.Mock()
__A = 5_00
__A = {}
__A = HTTPError
__A = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''', return_value=_lowerCamelCase ) as mock_head:
__A = cached_file(_lowerCamelCase, '''conf''', _raise_exceptions_for_connection_errors=_lowerCamelCase )
self.assertIsNone(_lowerCamelCase )
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''', _lowerCamelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''', _lowerCamelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''', _lowerCamelCase ) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''', '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_lowerCamelCase, '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''', _lowerCamelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_lowerCamelCase, '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''', _lowerCamelCase, revision='''ahaha''' )
__A = get_file_from_repo('''bert-base-cased''', _lowerCamelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
__A = json.loads(open(_lowerCamelCase, '''r''' ).read() )
self.assertEqual(config['''hidden_size'''], 7_68 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A = Path(_lowerCamelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(_lowerCamelCase, '''a.txt''' ), str(_lowerCamelCase ) )
self.assertIsNone(get_file_from_repo(_lowerCamelCase, '''b.txt''' ) )
| 266 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[int] ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''], model_result['''ss'''] ):
__A = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sgugger/tiny-distilbert-classification'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, only_pretrain_model=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, torchscript=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''', '''Cant do half precision''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, fpaa=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
# set architectures equal to `None`
__A = None
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''', '''Can\'t do half precision''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], fpaa=_lowerCamelCase, multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = '''sshleifer/tinier_bart'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = '''sshleifer/tinier_bart'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, save_to_csv=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(_lowerCamelCase, '''inf_time.csv''' ), train_memory_csv_file=os.path.join(_lowerCamelCase, '''train_mem.csv''' ), inference_memory_csv_file=os.path.join(_lowerCamelCase, '''inf_mem.csv''' ), train_time_csv_file=os.path.join(_lowerCamelCase, '''train_time.csv''' ), env_info_csv_file=os.path.join(_lowerCamelCase, '''env.csv''' ), multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''env.csv''' ) ).exists() )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_lowerCamelCase : List[Any] ):
self.assertTrue(hasattr(_lowerCamelCase, '''sequential''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''cumulative''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''current''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(_lowerCamelCase, '''log.txt''' ), log_print=_lowerCamelCase, trace_memory_line_by_line=_lowerCamelCase, multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''log.txt''' ) ).exists() )
| 266 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Any=10 , __lowerCamelCase : Any=18 , __lowerCamelCase : Any=30 , __lowerCamelCase : Union[str, Any]=4_00 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : str=True , __lowerCamelCase : Any=[0.5, 0.5, 0.5] , __lowerCamelCase : int=[0.5, 0.5, 0.5] , __lowerCamelCase : List[Any]=None , ) -> Tuple:
A : List[Any] = size if size is not None else {"shortest_edge": 18}
A : List[str] = crop_size if crop_size is not None else {"height": 18, "width": 18}
A : Optional[Any] = parent
A : int = batch_size
A : str = num_channels
A : Optional[Any] = num_frames
A : List[str] = image_size
A : str = min_resolution
A : str = max_resolution
A : str = do_resize
A : Tuple = size
A : str = do_normalize
A : Optional[int] = image_mean
A : Optional[int] = image_std
A : int = crop_size
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCamelCase_ ( _A ,unittest.TestCase ):
'''simple docstring'''
a__ = VivitImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
A : Tuple = VivitImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , "image_mean" ) )
self.assertTrue(hasattr(_snake_case , "image_std" ) )
self.assertTrue(hasattr(_snake_case , "do_normalize" ) )
self.assertTrue(hasattr(_snake_case , "do_resize" ) )
self.assertTrue(hasattr(_snake_case , "do_center_crop" ) )
self.assertTrue(hasattr(_snake_case , "size" ) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
A : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
A : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
A : int = prepare_video_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for video in video_inputs:
self.assertIsInstance(_snake_case , _snake_case )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
A : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A : str = image_processing(_snake_case , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
A : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for video in video_inputs:
self.assertIsInstance(_snake_case , _snake_case )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
A : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A : Dict = image_processing(_snake_case , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for video in video_inputs:
self.assertIsInstance(_snake_case , _snake_case )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
A : List[str] = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A : Any = image_processing(_snake_case , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , ) | 361 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
__SCREAMING_SNAKE_CASE = 637_8137.0
__SCREAMING_SNAKE_CASE = 635_6752.31_4245
__SCREAMING_SNAKE_CASE = 6378137
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : List[Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
A : Tuple = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) )
A : Tuple = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
A : List[str] = haversine_distance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
A : List[Any] = (b_lata + b_lata) / 2
A : Optional[Any] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
A : List[str] = (sin(_lowerCamelCase ) ** 2) * (cos(_lowerCamelCase ) ** 2)
A : Optional[int] = cos(sigma / 2 ) ** 2
A : int = (sigma - sin(_lowerCamelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
A : List[str] = (cos(_lowerCamelCase ) ** 2) * (sin(_lowerCamelCase ) ** 2)
A : Union[str, Any] = sin(sigma / 2 ) ** 2
A : int = (sigma + sin(_lowerCamelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod() | 256 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[Any] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCAmelCase_ (__a ):
"""simple docstring"""
__UpperCamelCase : str = '''vivit'''
def __init__(self , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=[2, 16, 16] , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu_fast" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-06 , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : str = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = image_size
SCREAMING_SNAKE_CASE__ : str = num_frames
SCREAMING_SNAKE_CASE__ : int = tubelet_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_channels
SCREAMING_SNAKE_CASE__ : Dict = qkv_bias
super().__init__(**_A )
| 25 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ ( __a ):
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_A , '''num_heads''' ) )
class lowerCamelCase_ :
def __init__( self : int , _A : Tuple , _A : Any=13 , _A : Optional[int]=64 , _A : Optional[Any]=3 , _A : List[str]=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Optional[int]=[1, 2, 10] , _A : int=[7, 3, 3] , _A : Union[str, Any]=[4, 2, 2] , _A : Dict=[2, 1, 1] , _A : Optional[Any]=[2, 2, 2] , _A : Optional[Any]=[False, False, True] , _A : List[Any]=[0.0, 0.0, 0.0] , _A : str=0.0_2 , _A : Tuple=1e-12 , _A : Union[str, Any]=True , _A : Optional[Any]=True , _A : Optional[int]=2 , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : List[str] = patch_sizes
UpperCAmelCase__ : Any = patch_stride
UpperCAmelCase__ : Tuple = patch_padding
UpperCAmelCase__ : int = is_training
UpperCAmelCase__ : Dict = use_labels
UpperCAmelCase__ : List[Any] = num_labels
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Optional[int] = embed_dim
UpperCAmelCase__ : int = num_heads
UpperCAmelCase__ : Any = stride_kv
UpperCAmelCase__ : str = depth
UpperCAmelCase__ : List[Any] = cls_token
UpperCAmelCase__ : List[Any] = attention_drop_rate
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Optional[int] = layer_norm_eps
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_labels:
# create a random int32 tensor of given shape
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Any ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowercase_ ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = TFCvtModel(config=_A )
UpperCAmelCase__ : List[str] = model(_A , training=_A )
UpperCAmelCase__ : int = (self.image_size, self.image_size)
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase__ : Union[str, Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase__ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowercase_ ( self : Optional[Any] , _A : Optional[Any] , _A : List[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFCvtForImageClassification(_A )
UpperCAmelCase__ : Any = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = config_and_inputs
UpperCAmelCase__ : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCAmelCase__ = (
{'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = TFCvtModelTester(self )
UpperCAmelCase__ : Tuple = TFCvtConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def lowercase_ ( self : Any ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def lowercase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def lowercase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(_A )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : str = model_class(_A )
UpperCAmelCase__ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
def check_hidden_states_output(_A : Dict , _A : Optional[Any] , _A : Dict ):
UpperCAmelCase__ : str = model_class(_A )
UpperCAmelCase__ : List[str] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Tuple = outputs.hidden_states
UpperCAmelCase__ : int = len(self.model_tester.depth )
self.assertEqual(len(_A ) , _A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Tuple = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : List[str] = True
check_hidden_states_output(_A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[int] = TFCvtModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> Any:
UpperCAmelCase__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase__ : Union[str, Any] = self.default_image_processor
UpperCAmelCase__ : Optional[Any] = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
UpperCAmelCase__ : Optional[Any] = model(**_A )
# verify the logits
UpperCAmelCase__ : Union[str, Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : Union[str, Any] = tf.constant([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _A , atol=1e-4 ) )
| 181 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "time_series_transformer"
snake_case__ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "student_t" , SCREAMING_SNAKE_CASE__ : str = "nll" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 100 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : Any=True , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> Union[str, Any]:
# time series specific configuration
lowerCAmelCase__ = prediction_length
lowerCAmelCase__ = context_length or prediction_length
lowerCAmelCase__ = distribution_output
lowerCAmelCase__ = loss
lowerCAmelCase__ = input_size
lowerCAmelCase__ = num_time_features
lowerCAmelCase__ = lags_sequence
lowerCAmelCase__ = scaling
lowerCAmelCase__ = num_dynamic_real_features
lowerCAmelCase__ = num_static_real_features
lowerCAmelCase__ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase__ = cardinality
else:
lowerCAmelCase__ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase__ = embedding_dimension
else:
lowerCAmelCase__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase__ = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase__ = input_size * len(SCREAMING_SNAKE_CASE__ ) + self._number_of_features
lowerCAmelCase__ = d_model
lowerCAmelCase__ = encoder_attention_heads
lowerCAmelCase__ = decoder_attention_heads
lowerCAmelCase__ = encoder_ffn_dim
lowerCAmelCase__ = decoder_ffn_dim
lowerCAmelCase__ = encoder_layers
lowerCAmelCase__ = decoder_layers
lowerCAmelCase__ = dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = encoder_layerdrop
lowerCAmelCase__ = decoder_layerdrop
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = init_std
lowerCAmelCase__ = use_cache
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def a ( self : List[Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 221 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "Speech2TextFeatureExtractor"
snake_case__ = "Speech2TextTokenizer"
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.feature_extractor
lowerCAmelCase__ = False
def __call__( self : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowerCAmelCase__ = kwargs.pop("raw_speech" )
else:
lowerCAmelCase__ = kwargs.pop("audio" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = kwargs.pop("sampling_rate" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = kwargs.pop("text" , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
lowerCAmelCase__ = args[0]
lowerCAmelCase__ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowerCAmelCase__ = self.feature_extractor(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None:
lowerCAmelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase__ = encodings["input_ids"]
return inputs
def a ( self : str , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : Dict , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@contextmanager
def a ( self : Union[str, Any] ) -> Any:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowerCAmelCase__ = True
lowerCAmelCase__ = self.tokenizer
yield
lowerCAmelCase__ = self.feature_extractor
lowerCAmelCase__ = False
| 221 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__UpperCAmelCase = TypeVar('T')
class lowerCamelCase (Generic[T] ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Any | T = None
UpperCAmelCase_ : int = len(_UpperCamelCase )
UpperCAmelCase_ : list[T] = [any_type for _ in range(self.N )] + arr
UpperCAmelCase_ : Optional[int] = fnc
self.build()
def __UpperCAmelCase ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
UpperCAmelCase_ : str = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> None:
p += self.N
UpperCAmelCase_ : List[str] = v
while p > 1:
UpperCAmelCase_ : Optional[Any] = p // 2
UpperCAmelCase_ : Union[str, Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> T | None: # noqa: E741
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = l + self.N, r + self.N
UpperCAmelCase_ : T | None = None
while l <= r:
if l % 2 == 1:
UpperCAmelCase_ : List[str] = self.st[l] if res is None else self.fn(_UpperCamelCase , self.st[l] )
if r % 2 == 0:
UpperCAmelCase_ : Dict = self.st[r] if res is None else self.fn(_UpperCamelCase , self.st[r] )
UpperCAmelCase_ , UpperCAmelCase_ : str = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__UpperCAmelCase = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__UpperCAmelCase = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__UpperCAmelCase = SegmentTree(test_array, min)
__UpperCAmelCase = SegmentTree(test_array, max)
__UpperCAmelCase = SegmentTree(test_array, lambda a, b: a + b)
def lowercase__ ( ):
'''simple docstring'''
for i in range(len(__snake_case ) ):
for j in range(__snake_case , len(__snake_case ) ):
UpperCAmelCase_ : Dict = reduce(__snake_case , test_array[i : j + 1] )
UpperCAmelCase_ : Dict = reduce(__snake_case , test_array[i : j + 1] )
UpperCAmelCase_ : str = reduce(lambda __snake_case , __snake_case : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__snake_case , __snake_case )
assert max_range == max_segment_tree.query(__snake_case , __snake_case )
assert sum_range == sum_segment_tree.query(__snake_case , __snake_case )
test_all_segments()
for index, value in test_updates.items():
__UpperCAmelCase = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 29 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowercase ( a__ ) -> Tuple:
__SCREAMING_SNAKE_CASE = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def __lowercase ( a__ ) -> int:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = emb.weight.shape
__SCREAMING_SNAKE_CASE = nn.Linear(a__ , a__ , bias=a__ )
__SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def __lowercase ( a__ ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = torch.load(a__ , map_location='cpu' )
__SCREAMING_SNAKE_CASE = mam_aaa['args'] or mam_aaa['cfg']['model']
__SCREAMING_SNAKE_CASE = mam_aaa['model']
remove_ignore_keys_(a__ )
__SCREAMING_SNAKE_CASE = state_dict['encoder.embed_tokens.weight'].shape[0]
__SCREAMING_SNAKE_CASE = MaMaaaConfig(
vocab_size=a__ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
__SCREAMING_SNAKE_CASE = state_dict['decoder.embed_tokens.weight']
__SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(a__ )
model.model.load_state_dict(a__ , strict=a__ )
__SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase__ : Optional[int] =parser.parse_args()
lowerCAmelCase__ : Tuple =convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 257 | 0 |
"""simple docstring"""
import os
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = len(grid[0] )
UpperCAmelCase = len(lowercase_ )
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowercase_ ):
for j in range(n_rows - 3 ):
UpperCAmelCase = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
UpperCAmelCase = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
UpperCAmelCase = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
UpperCAmelCase = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
UpperCAmelCase = max(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if max_product > largest:
UpperCAmelCase = max_product
return largest
def _lowerCAmelCase ( ):
UpperCAmelCase = []
with open(os.path.dirname(lowercase_ ) + '/grid.txt' ) as file:
for line in file:
grid.append(line.strip('\n' ).split(' ' ) )
UpperCAmelCase = [[int(lowercase_ ) for i in grid[j]] for j in range(len(lowercase_ ) )]
return largest_product(lowercase_ )
if __name__ == "__main__":
print(solution())
| 362 |
"""simple docstring"""
from math import factorial, radians
def _lowerCAmelCase ( lowercase_ , lowercase_ = 18 , lowercase_ = 10 ):
UpperCAmelCase = angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0)
# Converting from degrees to radians
UpperCAmelCase = radians(lowercase_ )
UpperCAmelCase = angle_in_radians
UpperCAmelCase = 3
UpperCAmelCase = -1
for _ in range(lowercase_ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase_ )
UpperCAmelCase = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase_ , lowercase_ )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 181 | 0 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
UpperCamelCase_ = None
try:
import msvcrt
except ImportError:
UpperCamelCase_ = None
try:
import fcntl
except ImportError:
UpperCamelCase_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
UpperCamelCase_ = OSError
# Data
# ------------------------------------------------
UpperCamelCase_ = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
UpperCamelCase_ = '''3.0.12'''
UpperCamelCase_ = None
def lowerCamelCase_ ( ):
'''simple docstring'''
global _logger
UpperCAmelCase_ : str = _logger or logging.getLogger(__name__ )
return _logger
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: Optional[Any] ,lowerCamelCase_: Any ) -> Any:
UpperCAmelCase_ : Dict = lock_file
return None
def __str__( self: List[Any] ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class _snake_case :
'''simple docstring'''
def __init__( self: int ,lowerCamelCase_: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : Optional[int] = lock
return None
def __enter__( self: List[str] ) -> Union[str, Any]:
return self.lock
def __exit__( self: int ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ) -> Union[str, Any]:
self.lock.release()
return None
class _snake_case :
'''simple docstring'''
def __init__( self: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=-1 ,lowerCamelCase_: List[str]=None ) -> int:
UpperCAmelCase_ : Any = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
UpperCAmelCase_ : List[Any] = self.hash_filename_if_too_long(lowerCamelCase_ ,lowerCamelCase_ )
# The path to the lock file.
UpperCAmelCase_ : Optional[int] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCAmelCase_ : Dict = None
# The default timeout value.
UpperCAmelCase_ : Optional[int] = timeout
# We use this lock primarily for the lock counter.
UpperCAmelCase_ : List[str] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCAmelCase_ : Optional[Any] = 0
return None
@property
def A__ ( self: Optional[int] ) -> Optional[int]:
return self._lock_file
@property
def A__ ( self: Dict ) -> Dict:
return self._timeout
@timeout.setter
def A__ ( self: Optional[int] ,lowerCamelCase_: Dict ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = float(lowerCamelCase_ )
return None
def A__ ( self: Tuple ) -> str:
raise NotImplementedError()
def A__ ( self: Union[str, Any] ) -> Dict:
raise NotImplementedError()
@property
def A__ ( self: Union[str, Any] ) -> Optional[Any]:
return self._lock_file_fd is not None
def A__ ( self: int ,lowerCamelCase_: List[str]=None ,lowerCamelCase_: str=0.0_5 ) -> Optional[int]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
UpperCAmelCase_ : Dict = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCAmelCase_ : int = id(self )
UpperCAmelCase_ : List[str] = self._lock_file
UpperCAmelCase_ : List[str] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(lowerCamelCase_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCAmelCase_ : List[Any] = max(0 ,self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def A__ ( self: Dict ,lowerCamelCase_: Dict=False ) -> Any:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCAmelCase_ : Any = id(self )
UpperCAmelCase_ : List[Any] = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
UpperCAmelCase_ : int = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self: str ) -> List[str]:
self.acquire()
return self
def __exit__( self: Union[str, Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: List[Any] ) -> Optional[int]:
self.release()
return None
def __del__( self: Optional[Any] ) -> Optional[Any]:
self.release(force=lowerCamelCase_ )
return None
def A__ ( self: str ,lowerCamelCase_: str ,lowerCamelCase_: int ) -> str:
UpperCAmelCase_ : Optional[int] = os.path.basename(lowerCamelCase_ )
if len(lowerCamelCase_ ) > max_length and max_length > 0:
UpperCAmelCase_ : List[Any] = os.path.dirname(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = str(hash(lowerCamelCase_ ) )
UpperCAmelCase_ : int = filename[: max_length - len(lowerCamelCase_ ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(lowerCamelCase_ ,lowerCamelCase_ )
else:
return path
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any]=-1 ,lowerCamelCase_: str=None ) -> Union[str, Any]:
from .file_utils import relative_to_absolute_path
super().__init__(lowerCamelCase_ ,timeout=lowerCamelCase_ ,max_filename_length=lowerCamelCase_ )
UpperCAmelCase_ : str = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def A__ ( self: Dict ) -> Any:
UpperCAmelCase_ : Union[str, Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCAmelCase_ : Union[str, Any] = os.open(self._lock_file ,lowerCamelCase_ )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCamelCase_ ,msvcrt.LK_NBLCK ,1 )
except OSError:
os.close(lowerCamelCase_ )
else:
UpperCAmelCase_ : Optional[Any] = fd
return None
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ : str = self._lock_file_fd
UpperCAmelCase_ : Dict = None
msvcrt.locking(lowerCamelCase_ ,msvcrt.LK_UNLCK ,1 )
os.close(lowerCamelCase_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str]=-1 ,lowerCamelCase_: Tuple=None ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = os.statvfs(os.path.dirname(lowerCamelCase_ ) ).f_namemax
super().__init__(lowerCamelCase_ ,timeout=lowerCamelCase_ ,max_filename_length=lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : str = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCAmelCase_ : Any = os.open(self._lock_file ,lowerCamelCase_ )
try:
fcntl.flock(lowerCamelCase_ ,fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCamelCase_ )
else:
UpperCAmelCase_ : int = fd
return None
def A__ ( self: Any ) -> Any:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
UpperCAmelCase_ : str = self._lock_file_fd
UpperCAmelCase_ : str = None
fcntl.flock(lowerCamelCase_ ,fcntl.LOCK_UN )
os.close(lowerCamelCase_ )
return None
class _snake_case ( __snake_case ):
'''simple docstring'''
def A__ ( self: Dict ) -> Optional[int]:
UpperCAmelCase_ : int = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCAmelCase_ : List[str] = os.open(self._lock_file ,lowerCamelCase_ )
except OSError:
pass
else:
UpperCAmelCase_ : str = fd
return None
def A__ ( self: List[str] ) -> str:
os.close(self._lock_file_fd )
UpperCAmelCase_ : Dict = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
UpperCamelCase_ = None
if msvcrt:
UpperCamelCase_ = WindowsFileLock
elif fcntl:
UpperCamelCase_ = UnixFileLock
else:
UpperCamelCase_ = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 345 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = AutoencoderKL
A__ : Optional[int] = "sample"
A__ : Tuple = 1E-2
@property
def A__ ( self: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 4
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : Any = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ )
return {"sample": image}
@property
def A__ ( self: List[str] ) -> Tuple:
return (3, 32, 32)
@property
def A__ ( self: Optional[Any] ) -> Any:
return (3, 32, 32)
def A__ ( self: Any ) -> Tuple:
UpperCAmelCase_ : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
UpperCAmelCase_ : int = self.dummy_input
return init_dict, inputs_dict
def A__ ( self: Optional[Any] ) -> int:
pass
def A__ ( self: str ) -> Any:
pass
@unittest.skipIf(torch_device == """mps""" ,"""Gradient checkpointing skipped on MPS""" )
def A__ ( self: Union[str, Any] ) -> Dict:
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = self.model_class(**lowerCamelCase_ )
model.to(lowerCamelCase_ )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ : Any = torch.randn_like(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ : str = self.model_class(**lowerCamelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ : Optional[int] = model_a(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ : Dict = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
UpperCAmelCase_ : Dict = dict(model.named_parameters() )
UpperCAmelCase_ : Union[str, Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) )
def A__ ( self: Optional[Any] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ,output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 )
model.to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A__ ( self: Optional[int] ) -> int:
UpperCAmelCase_ : Dict = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
UpperCAmelCase_ : Tuple = model.to(lowerCamelCase_ )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
else:
UpperCAmelCase_ : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : str = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
UpperCAmelCase_ : int = image.to(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ,generator=lowerCamelCase_ ).sample
UpperCAmelCase_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-2 ) )
@slow
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase_ ) for s in shape] )}.npy'''
def A__ ( self: Union[str, Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: List[Any]=(4, 3, 512, 512) ,lowerCamelCase_: Optional[Any]=False ) -> Optional[int]:
UpperCAmelCase_ : Tuple = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase_ ,lowerCamelCase_ ) ) ).to(lowerCamelCase_ ).to(lowerCamelCase_ )
return image
def A__ ( self: List[Any] ,lowerCamelCase_: List[str]="CompVis/stable-diffusion-v1-4" ,lowerCamelCase_: Union[str, Any]=False ) -> Any:
UpperCAmelCase_ : Optional[Any] = """fp16""" if fpaa else None
UpperCAmelCase_ : str = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : int = AutoencoderKL.from_pretrained(
lowerCamelCase_ ,subfolder="""vae""" ,torch_dtype=lowerCamelCase_ ,revision=lowerCamelCase_ ,)
model.to(lowerCamelCase_ ).eval()
return model
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any]=0 ) -> Optional[int]:
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase_ )
return torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ) -> Tuple:
UpperCAmelCase_ : List[Any] = self.get_sd_vae_model()
UpperCAmelCase_ : int = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Tuple:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.get_sd_vae_model()
UpperCAmelCase_ : Dict = self.get_sd_image(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : str = model(lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Any = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[int] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : str = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : Any = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ : Union[str, Any] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : str = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> int:
UpperCAmelCase_ : Optional[Any] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.get_sd_vae_model()
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[Any] = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : str = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : int = model.encode(lowerCamelCase_ ).latent_dist
UpperCAmelCase_ : Optional[Any] = dist.sample(generator=lowerCamelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ : Tuple = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ : Optional[Any] = torch.tensor(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=lowerCamelCase_ )
| 345 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Any =logging.get_logger(__name__)
__lowerCAmelCase : str ={
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class _A ( __SCREAMING_SNAKE_CASE ):
snake_case__ : int = "lxmert"
snake_case__ : int = {}
def __init__( self , __lowerCAmelCase=3_0522 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=9500 , __lowerCAmelCase=1600 , __lowerCAmelCase=400 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=9 , __lowerCAmelCase=5 , __lowerCAmelCase=5 , __lowerCAmelCase=2048 , __lowerCAmelCase=4 , __lowerCAmelCase=6.6_7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = num_qa_labels
lowercase = num_object_labels
lowercase = num_attr_labels
lowercase = l_layers
lowercase = x_layers
lowercase = r_layers
lowercase = visual_feat_dim
lowercase = visual_pos_dim
lowercase = visual_loss_normalizer
lowercase = task_matched
lowercase = task_mask_lm
lowercase = task_obj_predict
lowercase = task_qa
lowercase = visual_obj_loss
lowercase = visual_attr_loss
lowercase = visual_feat_loss
lowercase = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**__UpperCAmelCase )
| 366 | """simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""only integers accepted as input""" )
else:
lowercase = str(abs(lowerCAmelCase__ ) )
lowercase = [list(lowerCAmelCase__ ) for char in range(len(lowerCAmelCase__ ) )]
for index in range(len(lowerCAmelCase__ ) ):
num_transpositions[index].pop(lowerCAmelCase__ )
return max(
int("""""".join(list(lowerCAmelCase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 32 | 0 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A__ = 2
class __lowerCAmelCase :
def __init__( self , *, # begin keyword-only arguments
_snake_case="<s>" , _snake_case="<pad>" , _snake_case="</s>" , _snake_case="<unk>" , _snake_case=None , ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = bos, unk, pad, eos
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = {}
_lowerCAmelCase = self.add_symbol(_snake_case )
_lowerCAmelCase = self.add_symbol(_snake_case )
_lowerCAmelCase = self.add_symbol(_snake_case )
_lowerCAmelCase = self.add_symbol(_snake_case )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_snake_case )
_lowerCAmelCase = len(self.symbols )
def __eq__( self , _snake_case ):
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self , _snake_case ):
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
"""simple docstring"""
return len(self.symbols )
def __contains__( self , _snake_case ):
"""simple docstring"""
return sym in self.indices
@classmethod
def snake_case ( cls , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = cls()
d.add_from_file(_snake_case )
return d
def snake_case ( self , _snake_case , _snake_case=1 , _snake_case=False ):
"""simple docstring"""
if word in self.indices and not overwrite:
_lowerCAmelCase = self.indices[word]
_lowerCAmelCase = self.count[idx] + n
return idx
else:
_lowerCAmelCase = len(self.symbols )
_lowerCAmelCase = idx
self.symbols.append(_snake_case )
self.count.append(_snake_case )
return idx
def snake_case ( self , _snake_case ):
"""simple docstring"""
return 0
def snake_case ( self , _snake_case ):
"""simple docstring"""
if isinstance(_snake_case , _snake_case ):
try:
with open(_snake_case , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(_snake_case )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_snake_case ) )
return
_lowerCAmelCase = f.readlines()
_lowerCAmelCase = self._load_meta(_snake_case )
for line in lines[indices_start_line:]:
try:
_lowerCAmelCase , _lowerCAmelCase = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
_lowerCAmelCase = True
_lowerCAmelCase , _lowerCAmelCase = line.rsplit(""" """ , 1 )
else:
_lowerCAmelCase = False
_lowerCAmelCase = int(_snake_case )
_lowerCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: \'{}\'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(_snake_case ) )
self.add_symbol(_snake_case , n=_snake_case , overwrite=_snake_case )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected \'<token> <cnt> [flags]\'""" )
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = dict((re.sub(R"""@@$""" , """""" , _a ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , _a ), v) for k, v in d.items() )
_lowerCAmelCase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
_lowerCAmelCase = d[k] # restore
return da
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
if not os.path.exists(_a ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(_a , exist_ok=_a )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
_lowerCAmelCase = os.path.join(_a , """checkpoint.pt""" )
if not os.path.isfile(_a ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
_lowerCAmelCase = torch.load(_a , map_location="""cpu""" )
_lowerCAmelCase = chkpt["""cfg"""]["""model"""]
# dicts
_lowerCAmelCase = os.path.join(_a , """dict.txt""" )
if not os.path.isfile(_a ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
_lowerCAmelCase = Dictionary.load(_a )
_lowerCAmelCase = rewrite_dict_keys(src_dict.indices )
_lowerCAmelCase = len(_a )
_lowerCAmelCase = os.path.join(_a , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(_a , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a ) )
# merges_file (bpecodes)
_lowerCAmelCase = os.path.join(_a , """bpecodes""" )
if not os.path.isfile(_a ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
_lowerCAmelCase = os.path.join(_a , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(_a , _a )
# model config
_lowerCAmelCase = os.path.join(_a , """config.json""" )
_lowerCAmelCase = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1E-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(_a , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a ) )
# tokenizer config
_lowerCAmelCase = os.path.join(_a , _a )
_lowerCAmelCase = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 10_24,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(_a , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a ) )
# model
_lowerCAmelCase = chkpt["""model"""]
# remove unneeded keys
_lowerCAmelCase = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(_a , _a )
_lowerCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
_lowerCAmelCase = model_state_dict.pop(_a )
else:
_lowerCAmelCase = model_state_dict.pop(_a )
_lowerCAmelCase = BioGptConfig.from_pretrained(_a )
_lowerCAmelCase = BioGptForCausalLM(_a )
# check that it loads ok
model_new.load_state_dict(_a )
# save
_lowerCAmelCase = os.path.join(_a , _a )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(_a , _a )
print("""Conversion is done!""" )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A__ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 82 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def snake_case_ (_a : Tuple ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def snake_case_ ():
UpperCAmelCase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a )
UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_a )
EnvironmentCommand.register_subcommand(_a )
TestCommand.register_subcommand(_a )
RunBeamCommand.register_subcommand(_a )
DummyDataCommand.register_subcommand(_a )
# Parse args
UpperCAmelCase , UpperCAmelCase = parser.parse_known_args()
if not hasattr(_a , '''func''' ):
parser.print_help()
exit(1 )
UpperCAmelCase = parse_unknown_args(_a )
# Run
UpperCAmelCase = args.func(_a , **_a )
service.run()
if __name__ == "__main__":
main()
| 34 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCamelCase_ = 4 ):
__SCREAMING_SNAKE_CASE = abs(UpperCamelCase_ ) or 4
return [[1 + x + y * row_size for x in range(UpperCamelCase_ )] for y in range(UpperCamelCase_ )]
def _lowerCAmelCase ( UpperCamelCase_ ):
return reverse_row(transpose(UpperCamelCase_ ) )
# OR.. transpose(reverse_column(matrix))
def _lowerCAmelCase ( UpperCamelCase_ ):
return reverse_row(reverse_column(UpperCamelCase_ ) )
# OR.. reverse_column(reverse_row(matrix))
def _lowerCAmelCase ( UpperCamelCase_ ):
return reverse_column(transpose(UpperCamelCase_ ) )
# OR.. transpose(reverse_row(matrix))
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = [list(UpperCamelCase_ ) for x in zip(*UpperCamelCase_ )]
return matrix
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = matrix[::-1]
return matrix
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = [x[::-1] for x in matrix]
return matrix
def _lowerCAmelCase ( UpperCamelCase_ ):
for i in matrix:
print(*UpperCamelCase_ )
if __name__ == "__main__":
__magic_name__ = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
__magic_name__ = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
__magic_name__ = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 255 |
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=1024 ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = [], []
__SCREAMING_SNAKE_CASE = list(zip(UpperCamelCase_ , UpperCamelCase_ ) )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = sorted_examples[0]
def is_too_big(UpperCamelCase_ ):
return tok(UpperCamelCase_ , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__SCREAMING_SNAKE_CASE = new_src + """ """ + src
__SCREAMING_SNAKE_CASE = new_tgt + """ """ + tgt
if is_too_big(UpperCamelCase_ ) or is_too_big(UpperCamelCase_ ): # cant fit, finalize example
finished_src.append(UpperCamelCase_ )
finished_tgt.append(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = src, tgt
else: # can fit, keep adding
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCamelCase_ )
finished_tgt.append(UpperCamelCase_ )
return finished_src, finished_tgt
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = Path(UpperCamelCase_ )
save_path.mkdir(exist_ok=UpperCamelCase_ )
for split in ["train"]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = data_dir / f"{split}.source", data_dir / f"{split}.target"
__SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(UpperCamelCase_ ).open().readlines()]
__SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(UpperCamelCase_ ).open().readlines()]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = pack_examples(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
print(f"packed {split} split from {len(UpperCamelCase_ )} examples -> {len(UpperCamelCase_ )}." )
Path(save_path / f"{split}.source" ).open("""w""" ).write("""\n""".join(UpperCamelCase_ ) )
Path(save_path / f"{split}.target" ).open("""w""" ).write("""\n""".join(UpperCamelCase_ ) )
for split in ["val", "test"]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = data_dir / f"{split}.source", data_dir / f"{split}.target"
shutil.copyfile(UpperCamelCase_ , save_path / f"{split}.source" )
shutil.copyfile(UpperCamelCase_ , save_path / f"{split}.target" )
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=UpperCamelCase_ , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=UpperCamelCase_ , default=128 )
parser.add_argument("""--data_dir""" , type=UpperCamelCase_ )
parser.add_argument("""--save_path""" , type=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCamelCase_ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 255 | 1 |
from __future__ import annotations
class _lowercase :
"""simple docstring"""
def __init__(self , lowerCamelCase_=None ):
"""simple docstring"""
a = data
a = None
def __repr__(self ):
"""simple docstring"""
a = []
a = self
while temp:
string_rep.append(F'''{temp.data}''' )
a = temp.next
return "->".join(lowerCamelCase_ )
def a( A : list ) -> Optional[Any]:
"""simple docstring"""
if not elements_list:
raise Exception("The Elements List is empty" )
a = a = Node(elements_list[0] )
for i in range(1 , len(A ) ):
a = Node(elements_list[i] )
a = current.next
return head
def a( A : Node ) -> None:
"""simple docstring"""
if head_node is not None and isinstance(A , A ):
print_reverse(head_node.next )
print(head_node.data )
def a( ) -> int:
"""simple docstring"""
from doctest import testmod
testmod()
a = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(A )
print("Elements in Reverse:" )
print_reverse(A )
if __name__ == "__main__":
main()
| 227 |
def a( A : list ) -> list:
"""simple docstring"""
if any(not isinstance(A , A ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(A ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(A , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 227 | 1 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[int]:
"""simple docstring"""
if num <= 0:
A__ = f"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(lowercase_ )
A__ = [True] * (num + 1)
A__ = []
A__ = 2
A__ = int(math.sqrt(lowercase_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowercase_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowercase_ ):
if sieve[i] is True:
A__ = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowercase_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 231 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = AutoConfig.from_pretrained(lowercase_ )
A__ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowercase_ )
A__ = checkpoints.load_tax_checkpoint(lowercase_ )
A__ = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
A__ = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
A__ = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
A__ = f"""layers_{str(lowercase_ )}"""
# Self-Attention
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
A__ = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
A__ = flax_model.params['''encoder''']['''block'''][str(lowercase_ )]['''layer''']
A__ = tax_attention_key
A__ = tax_attention_out
A__ = tax_attention_query
A__ = tax_attention_value
A__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = tax_global_layer_norm
if split_mlp_wi:
A__ = tax_mlp_wi_a
A__ = tax_mlp_wi_a
else:
A__ = tax_mlp_wi
A__ = tax_mlp_wo
A__ = tax_mlp_layer_norm
A__ = flax_model_encoder_layer_block
# Only for layer 0:
A__ = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
A__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
A__ = tax_encoder_global_rel_embedding
# Assigning
A__ = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
A__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
A__ = f"""layers_{str(lowercase_ )}"""
# Self-Attention
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
A__ = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
A__ = tax_enc_dec_attention_module['''key''']['''kernel''']
A__ = tax_enc_dec_attention_module['''out''']['''kernel''']
A__ = tax_enc_dec_attention_module['''query''']['''kernel''']
A__ = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
A__ = flax_model.params['''decoder''']['''block'''][str(lowercase_ )]['''layer''']
A__ = tax_attention_key
A__ = tax_attention_out
A__ = tax_attention_query
A__ = tax_attention_value
A__ = tax_pre_attention_layer_norm
A__ = tax_enc_dec_attention_key
A__ = tax_enc_dec_attention_out
A__ = tax_enc_dec_attention_query
A__ = tax_enc_dec_attention_value
A__ = tax_cross_layer_norm
if split_mlp_wi:
A__ = tax_mlp_wi_a
A__ = tax_mlp_wi_a
else:
A__ = tax_mlp_wi
A__ = tax_mlp_wo
A__ = txa_mlp_layer_norm
A__ = flax_model_decoder_layer_block
# Decoder Normalization
A__ = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
A__ = txa_decoder_norm
# Only for layer 0:
A__ = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
A__ = tax_decoder_rel_embedding
# Token Embeddings
A__ = tax_model['''target''']['''token_embedder''']['''embedding''']
A__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
A__ = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(lowercase_ )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 231 | 1 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> None:
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 5 | '''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__a: Optional[Any] = 16
__a: Any = 32
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 16 ):
lowercase__ : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCAmelCase , max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : List[Any] = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : str = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : Dict = 8
else:
lowercase__ : Optional[int] = None
return tokenizer.pad(
UpperCAmelCase , padding='''longest''' , max_length=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : str = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
lowercase__ : Optional[int] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__a: Tuple = mocked_dataloaders # noqa: F811
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , UpperCAmelCase ) == "1":
lowercase__ : Optional[int] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowercase__ : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
lowercase__ : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : int = config['''lr''']
lowercase__ : Optional[int] = int(config['''num_epochs'''] )
lowercase__ : Optional[Any] = int(config['''seed'''] )
lowercase__ : int = int(config['''batch_size'''] )
set_seed(UpperCAmelCase )
lowercase__ , lowercase__ : str = get_dataloaders(UpperCAmelCase , UpperCAmelCase )
lowercase__ : Union[str, Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
lowercase__ : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase__ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
lowercase__ : Any = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : List[str] = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : List[Any] = AdamW(params=model.parameters() , lr=UpperCAmelCase )
# Instantiate scheduler
lowercase__ : List[str] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowercase__ : Optional[Any] = os.path.split(UpperCAmelCase )[-1].split('''.''' )[0]
accelerator.init_trackers(UpperCAmelCase , UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowercase__ : str = 0
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : List[str] = model(**UpperCAmelCase )
lowercase__ : List[str] = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowercase__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : List[str] = model(**UpperCAmelCase )
lowercase__ : Optional[int] = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCAmelCase , references=UpperCAmelCase , )
lowercase__ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(UpperCAmelCase ),
'''epoch''': epoch,
} , step=UpperCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def __UpperCamelCase ( ):
lowercase__ : Any = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCAmelCase , default=UpperCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=UpperCAmelCase , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
lowercase__ : str = parser.parse_args()
lowercase__ : Tuple = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 198 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowercase= model_type_to_module_name(lowercase__ )
__lowercase= importlib.import_module(F'.{module_name}' , 'transformers.models' )
try:
return getattr(lowercase__ , lowercase__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowercase__ , '__name__' , lowercase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowercase= importlib.import_module('transformers' )
if hasattr(lowercase__ , lowercase__ ):
return getattr(lowercase__ , lowercase__ )
return None
def _lowerCamelCase( lowercase__ , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , **lowercase__ , ) -> List[str]:
'''simple docstring'''
__lowercase= get_file_from_repo(
lowercase__ , lowercase__ , cache_dir=lowercase__ , force_download=lowercase__ , resume_download=lowercase__ , proxies=lowercase__ , use_auth_token=lowercase__ , revision=lowercase__ , local_files_only=lowercase__ , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(lowercase__ , encoding='utf-8' ) as reader:
return json.load(lowercase__ )
class A :
def __init__(self ):
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(lowerCAmelCase )
def _A (cls , lowerCAmelCase , **lowerCAmelCase ):
__lowercase= kwargs.pop('config' , lowerCAmelCase )
__lowercase= kwargs.pop('trust_remote_code' , lowerCAmelCase )
__lowercase= True
__lowercase, __lowercase= FeatureExtractionMixin.get_feature_extractor_dict(lowerCAmelCase , **lowerCAmelCase )
__lowercase= config_dict.get('feature_extractor_type' , lowerCAmelCase )
__lowercase= None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
__lowercase= config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= AutoConfig.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
# It could be in `config.feature_extractor_type``
__lowercase= getattr(lowerCAmelCase , 'feature_extractor_type' , lowerCAmelCase )
if hasattr(lowerCAmelCase , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
__lowercase= config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
__lowercase= feature_extractor_class_from_name(lowerCAmelCase )
__lowercase= feature_extractor_auto_map is not None
__lowercase= feature_extractor_class is not None or type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING
__lowercase= resolve_trust_remote_code(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if has_remote_code and trust_remote_code:
__lowercase= get_class_from_dynamic_module(
lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
__lowercase= kwargs.pop('code_revision' , lowerCAmelCase )
if os.path.isdir(lowerCAmelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowerCAmelCase , **lowerCAmelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowerCAmelCase , **lowerCAmelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING:
__lowercase= FEATURE_EXTRACTOR_MAPPING[type(lowerCAmelCase )]
return feature_extractor_class.from_dict(lowerCAmelCase , **lowerCAmelCase )
raise ValueError(
f'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '
f'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '
f'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def _A (lowerCAmelCase , lowerCAmelCase ):
FEATURE_EXTRACTOR_MAPPING.register(lowerCAmelCase , lowerCAmelCase )
| 304 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask''']
def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= spectrogram_length
__lowercase= num_channels
__lowercase= patch_size
__lowercase= feature_size // self.patch_size[1]
__lowercase= n_fft
__lowercase= sampling_rate // hop_length_to_sampling_rate
__lowercase= sampling_rate
__lowercase= padding_value
__lowercase= mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T
def _A (self , lowerCAmelCase ):
__lowercase= spectrogram(
lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
__lowercase= log_spec[:, :-1]
__lowercase= log_spec - 20.0
__lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__lowercase= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
__lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase= raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase= [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowercase= [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
__lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowercase= max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowercase= [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowercase= np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
__lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowercase= padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
__lowercase= audio_features[i]
__lowercase= feature
# return as BatchFeature
if return_attention_mask:
__lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowercase= {'audio_values': padded_audio_features}
__lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 304 | 1 |
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase ( lowerCamelCase__ ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , """embed_dim""" ) )
self.parent.assertTrue(hasattr(_snake_case , """num_heads""" ) )
class __lowerCAmelCase :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=64 , _snake_case=3 , _snake_case=[16, 48, 96] , _snake_case=[1, 3, 6] , _snake_case=[1, 2, 10] , _snake_case=[7, 3, 3] , _snake_case=[4, 2, 2] , _snake_case=[2, 1, 1] , _snake_case=[2, 2, 2] , _snake_case=[False, False, True] , _snake_case=[0.0, 0.0, 0.0] , _snake_case=0.02 , _snake_case=1e-12 , _snake_case=True , _snake_case=True , _snake_case=2 , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_sizes
_lowerCAmelCase = patch_stride
_lowerCAmelCase = patch_padding
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = num_heads
_lowerCAmelCase = stride_kv
_lowerCAmelCase = depth
_lowerCAmelCase = cls_token
_lowerCAmelCase = attention_drop_rate
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
# create a random int32 tensor of given shape
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def snake_case ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFCvtModel(config=_snake_case )
_lowerCAmelCase = model(_snake_case , training=_snake_case )
_lowerCAmelCase = (self.image_size, self.image_size)
_lowerCAmelCase , _lowerCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_lowerCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_lowerCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def snake_case ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFCvtForImageClassification(_snake_case )
_lowerCAmelCase = model(_snake_case , labels=_snake_case , training=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
__lowerCamelCase = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFCvtModelTester(self )
_lowerCAmelCase = TFCvtConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def snake_case ( self ):
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def snake_case ( self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(_snake_case )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_snake_case )
_lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _snake_case )
def snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(_snake_case , _snake_case , _snake_case ):
_lowerCAmelCase = model_class(_snake_case )
_lowerCAmelCase = model(**self._prepare_for_class(_snake_case , _snake_case ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFCvtModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_snake_case , return_tensors="""tf""" )
# forward pass
_lowerCAmelCase = model(**_snake_case )
# verify the logits
_lowerCAmelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _snake_case )
_lowerCAmelCase = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _snake_case , atol=1e-4 ) )
| 82 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : Optional[int]=1_3 ,lowercase__ : Any=7 ,lowercase__ : Union[str, Any]=True ,lowercase__ : Optional[int]=True ,lowercase__ : List[str]=True ,lowercase__ : str=True ,lowercase__ : Dict=9_9 ,lowercase__ : Union[str, Any]=3_2 ,lowercase__ : List[str]=5 ,lowercase__ : int=4 ,lowercase__ : Dict=3_7 ,lowercase__ : Union[str, Any]="gelu" ,lowercase__ : str=0.1 ,lowercase__ : List[str]=0.1 ,lowercase__ : Any=5_1_2 ,lowercase__ : Optional[int]=1_6 ,lowercase__ : Optional[int]=2 ,lowercase__ : Optional[int]=0.0_2 ,lowercase__ : Dict=False ,lowercase__ : Optional[int]=True ,lowercase__ : str="None" ,lowercase__ : Optional[int]=3 ,lowercase__ : List[Any]=4 ,lowercase__ : Union[str, Any]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = relative_attention
__lowercase = position_biased_input
__lowercase = pos_att_type
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return DebertaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,pos_att_type=self.pos_att_type ,)
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.get_config()
__lowercase = 3_0_0
return config
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[Any] ):
self.parent.assertListEqual(list(result.loss.size() ) ,[] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Union[str, Any] ):
__lowercase = DebertaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )[0]
__lowercase = model(lowercase__ ,token_type_ids=lowercase__ )[0]
__lowercase = model(lowercase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) ,[self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : str ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : Dict ,lowercase__ : int ,lowercase__ : Tuple ,lowercase__ : int ):
__lowercase = DebertaForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Optional[int] ,lowercase__ : Optional[int] ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
__lowercase = self.num_labels
__lowercase = DebertaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertListEqual(list(result.logits.size() ) ,[self.batch_size, self.num_labels] )
self.check_loss_output(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Optional[int] ):
__lowercase = self.num_labels
__lowercase = DebertaForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Any ,lowercase__ : Any ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : List[str] ):
__lowercase = DebertaForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = DebertaModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = DebertaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def SCREAMING_SNAKE_CASE ( self : str ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
__lowercase = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
# compare the actual values for a slice.
__lowercase = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) ,F"{output[:, 1:4, 1:4]}" )
| 104 | 0 |
'''simple docstring'''
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_lowerCAmelCase = open # noqa: we just need to have a builtin inside this module to test it properly
| 367 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["GLPNFeatureExtractor"]
_lowerCAmelCase = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 98 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.