code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from collections.abc import Iterable
from typing import Generic, TypeVar
_snake_case : Dict = TypeVar("_T")
class a (Generic[_T] ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase : Iterable[_T] | None = None ) -> None:
__snake_case : list[_T] = list(iterable or [] )
__snake_case : list[_T] = []
def __len__( self : Optional[int] ) -> int:
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Tuple ) -> str:
return F'Queue({tuple(self._stacka[::-1] + self._stacka )})'
def __snake_case ( self : Tuple , lowerCamelCase : _T ) -> None:
self._stacka.append(lowerCamelCase )
def __snake_case ( self : str ) -> _T:
__snake_case : List[str] = self._stacka.pop
__snake_case : int = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 81 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__UpperCAmelCase = '''src/transformers'''
__UpperCAmelCase = '''docs/source/en/tasks'''
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Any ) -> Optional[int]:
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase : Optional[Any] = f.readlines()
# Find the start prompt.
UpperCamelCase : List[Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
UpperCamelCase : Optional[Any] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
__UpperCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__UpperCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase : Tuple = TASK_GUIDE_TO_MODELS[task_guide]
UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
UpperCamelCase : Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def UpperCamelCase ( snake_case__ : str , snake_case__ : Optional[int]=False ) -> Tuple:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
UpperCamelCase : Optional[Any] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
' to fix this.' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__UpperCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 40 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 82 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : int = IFPipeline
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case_ ( self ) -> str:
return self._get_dummy_components()
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA' )
def snake_case_ ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case_ ( self ) -> Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_local()
def snake_case_ ( self ) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def snake_case_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> List[Any]:
# if
UpperCamelCase : Union[str, Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0', variant='fp16', torch_dtype=torch.floataa )
UpperCamelCase : str = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0', variant='fp16', torch_dtype=torch.floataa, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
UpperCamelCase , UpperCamelCase : List[str] = pipe_a.encode_prompt('anime turtle', device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCamelCase : int = None
UpperCamelCase : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCamelCase : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
UpperCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCamelCase : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
UpperCamelCase : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
UpperCamelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : int = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : Dict = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = floats_tensor((1, 3, 256, 256), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 40 | 0 |
"""simple docstring"""
def snake_case_ ( A_ : int, A_ : list ):
'''simple docstring'''
_enforce_args(A_, A_ )
if n == 0:
return 0
_lowerCamelCase : Tuple = float('''-inf''' )
for i in range(1, n + 1 ):
_lowerCamelCase : List[Any] = max(
A_, prices[i - 1] + naive_cut_rod_recursive(n - i, A_ ) )
return max_revue
def snake_case_ ( A_ : int, A_ : list ):
'''simple docstring'''
_enforce_args(A_, A_ )
_lowerCamelCase : Dict = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(A_, A_, A_ )
def snake_case_ ( A_ : int, A_ : list, A_ : list ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowerCamelCase : Tuple = float('''-inf''' )
for i in range(1, n + 1 ):
_lowerCamelCase : int = max(
A_, prices[i - 1] + _top_down_cut_rod_recursive(n - i, A_, A_ ), )
_lowerCamelCase : int = max_revenue
return max_rev[n]
def snake_case_ ( A_ : int, A_ : list ):
'''simple docstring'''
_enforce_args(A_, A_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowerCamelCase : Union[str, Any] = [float('''-inf''' ) for _ in range(n + 1 )]
_lowerCamelCase : Dict = 0
for i in range(1, n + 1 ):
_lowerCamelCase : Dict = max_rev[i]
for j in range(1, i + 1 ):
_lowerCamelCase : Optional[Any] = max(A_, prices[j - 1] + max_rev[i - j] )
_lowerCamelCase : Tuple = max_revenue_i
return max_rev[n]
def snake_case_ ( A_ : int, A_ : list ):
'''simple docstring'''
if n < 0:
_lowerCamelCase : List[Any] = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(A_ )
if n > len(A_ ):
_lowerCamelCase : Union[str, Any] = (
'''Each integral piece of rod must have a corresponding price. '''
F'''Got n = {n} but length of prices = {len(A_ )}'''
)
raise ValueError(A_ )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : int = [6, 10, 12, 15, 20, 23]
_lowerCamelCase : int = len(A_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowerCamelCase : Any = 36
_lowerCamelCase : Optional[int] = top_down_cut_rod(A_, A_ )
_lowerCamelCase : int = bottom_up_cut_rod(A_, A_ )
_lowerCamelCase : List[Any] = naive_cut_rod_recursive(A_, A_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 83 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def UpperCamelCase ( snake_case__ : Tuple="" ) -> str:
UpperCamelCase : Union[str, Any] = tempfile.mkdtemp()
return os.path.join(snake_case__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> int:
UpperCamelCase : Union[str, Any] = torch.rand(12, dtype=torch.floataa ) - 0.5
UpperCamelCase : Union[str, Any] = AgentAudio(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
# Ensure that the file contains the same value as the original tensor
UpperCamelCase , UpperCamelCase : Any = sf.read(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, torch.tensor(SCREAMING_SNAKE_CASE_ ), atol=1e-4 ) )
def snake_case_ ( self ) -> Any:
UpperCamelCase : Optional[int] = torch.rand(12, dtype=torch.floataa ) - 0.5
UpperCamelCase : Union[str, Any] = get_new_path(suffix='.wav' )
sf.write(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, 1_6000 )
UpperCamelCase : int = AgentAudio(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) )
self.assertEqual(agent_type.to_string(), SCREAMING_SNAKE_CASE_ )
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Any:
UpperCamelCase : Dict = torch.randint(0, 256, (64, 64, 3) )
UpperCamelCase : Union[str, Any] = AgentImage(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type._tensor, atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw(), Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
UpperCamelCase : Optional[int] = Image.open(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = AgentImage(SCREAMING_SNAKE_CASE_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
UpperCamelCase : Union[str, Any] = Image.open(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = AgentImage(SCREAMING_SNAKE_CASE_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = 'Hey!'
UpperCamelCase : Dict = AgentText(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_string() )
self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_raw() )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 40 | 0 |
from itertools import product
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = sides_number
lowercase = max_face_number * dice_number
lowercase = [0] * (max_total + 1)
lowercase = 1
lowercase = range(__SCREAMING_SNAKE_CASE , max_face_number + 1 )
for dice_numbers in product(__SCREAMING_SNAKE_CASE , repeat=__SCREAMING_SNAKE_CASE ):
lowercase = sum(__SCREAMING_SNAKE_CASE )
totals_frequencies[total] += 1
return totals_frequencies
def UpperCAmelCase_ ( ):
lowercase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowercase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowercase = 0
lowercase = 9
lowercase = 4 * 9
lowercase = 6
for peter_total in range(__SCREAMING_SNAKE_CASE , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowercase = (4**9) * (6**6)
lowercase = peter_wins_count / total_games_number
lowercase = round(__SCREAMING_SNAKE_CASE , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 84 |
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Any ) -> Union[str, Any]:
UpperCamelCase : int = [1]
for i in range(2 , snake_case__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCamelCase : List[Any] = []
UpperCamelCase : List[Any] = list(range(snake_case__ ) )
# Find permutation
while factorials:
UpperCamelCase : int = factorials.pop()
UpperCamelCase , UpperCamelCase : int = divmod(snake_case__ , snake_case__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
def _a ( lowercase__ : Union[str, Any] , lowercase__ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE__ : int = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def _a ( lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : Optional[int]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE__ : Any = ''
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : int = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Any = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ : int = in_proj_bias[-config.hidden_size :]
def _a ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def _a ( lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = dct.pop(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = val
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE__ : Optional[int] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _a ( lowercase__ : Union[str, Any] , lowercase__ : List[str] , lowercase__ : int=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=lowercase__ , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ViTHybridConfig(backbone_config=lowercase__ , image_size=3_84 , num_labels=10_00 )
SCREAMING_SNAKE_CASE__ : Any = False
# load original model from timm
SCREAMING_SNAKE_CASE__ : str = timm.create_model(lowercase__ , pretrained=lowercase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = create_rename_keys(lowercase__ , lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ : Any = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ : List[Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Optional[Any] = idalabel
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE__ : int = ViTHybridModel(lowercase__ ).eval()
else:
SCREAMING_SNAKE_CASE__ : Tuple = ViTHybridForImageClassification(lowercase__ ).eval()
model.load_state_dict(lowercase__ )
# create image processor
SCREAMING_SNAKE_CASE__ : List[Any] = create_transform(**resolve_data_config({} , model=lowercase__ ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = transform.transforms
SCREAMING_SNAKE_CASE__ : List[Any] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = ViTHybridImageProcessor(
do_resize=lowercase__ , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase__ , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=lowercase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE__ : Tuple = prepare_img()
SCREAMING_SNAKE_CASE__ : Any = transform(lowercase__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = processor(lowercase__ , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowercase__ , lowercase__ )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits
print('Predicted class:' , logits.argmax(-1 ).item() )
if base_model:
SCREAMING_SNAKE_CASE__ : Optional[int] = timm_model.forward_features(lowercase__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase__ , outputs.pooler_output , atol=1E-3 )
else:
SCREAMING_SNAKE_CASE__ : Dict = timm_model(lowercase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase__ )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 85 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ ( a__ ):
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'width_multiplier' ) )
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_="swish", SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=0.25, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, ) -> Any:
UpperCamelCase : int = parent
UpperCamelCase : int = batch_size
UpperCamelCase : List[Any] = image_size
UpperCamelCase : List[str] = patch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : List[str] = make_divisible(512 * width_multiplier, divisor=8 )
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : Optional[int] = conv_kernel_size
UpperCamelCase : List[str] = output_stride
UpperCamelCase : Union[str, Any] = classifier_dropout_prob
UpperCamelCase : List[Any] = use_labels
UpperCamelCase : Any = is_training
UpperCamelCase : int = num_labels
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Tuple = scope
UpperCamelCase : List[str] = width_multiplier
UpperCamelCase : Any = ffn_dropout
UpperCamelCase : List[Any] = attn_dropout
def snake_case_ ( self ) -> int:
UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : List[str] = None
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCamelCase : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self ) -> int:
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : Any = MobileViTVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Optional[int] = self.num_labels
UpperCamelCase : Tuple = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Any = self.num_labels
UpperCamelCase : Optional[Any] = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = config_and_inputs
UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Tuple = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Any = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[Any] = False
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Dict = MobileViTVaModelTester(self )
UpperCamelCase : Optional[Any] = MobileViTVaConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def snake_case_ ( self ) -> int:
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def snake_case_ ( self ) -> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case_ ( self ) -> Any:
pass
def snake_case_ ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : str = [*signature.parameters.keys()]
UpperCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Tuple:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Tuple = outputs.hidden_states
UpperCamelCase : Dict = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCamelCase : Any = 2
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ) -> Optional[Any]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = MobileViTVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> str:
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.default_image_processor
UpperCamelCase : Any = prepare_img()
UpperCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : List[str] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : Any = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = outputs.logits
# verify the logits
UpperCamelCase : Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
], device=SCREAMING_SNAKE_CASE_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : str = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Optional[int] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Tuple = prepare_img()
UpperCamelCase : int = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : str = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = outputs.logits.detach().cpu()
UpperCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_, target_sizes=[(50, 60)] )
UpperCamelCase : Optional[int] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
| 40 | 0 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__a :Tuple = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
__a :List[Any] = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
__a :List[str] = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
__a :List[str] = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
__a :str = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Dict ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any]=[1, 10, 100] , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Union[str, Any]=3.0 ):
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=UpperCAmelCase ) as executor:
A_ = []
A_ = Counter()
A_ = 0
A_ = defaultdict(UpperCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(UpperCAmelCase , UpperCAmelCase ) ):
for candidate in candidates:
A_ = candidate + "\n" + test_case
A_ = (test_program, timeout, task_id, completion_id[task_id])
A_ = executor.submit(UpperCAmelCase , *UpperCAmelCase )
futures.append(UpperCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(UpperCAmelCase ):
A_ = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
A_ , A_ = [], []
for result in results.values():
result.sort()
A_ = [r[1]["passed"] for r in result]
total.append(len(UpperCAmelCase ) )
correct.append(sum(UpperCAmelCase ) )
A_ = np.array(UpperCAmelCase )
A_ = np.array(UpperCAmelCase )
A_ = k
A_ = {f'''pass@{k}''': estimate_pass_at_k(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ):
"""simple docstring"""
def estimator(__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = itertools.repeat(__UpperCamelCase ,len(__UpperCamelCase ) )
else:
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = iter(__UpperCamelCase )
return np.array([estimator(int(__UpperCamelCase ) ,int(__UpperCamelCase ) ,__UpperCamelCase ) for n, c in zip(__UpperCamelCase ,__UpperCamelCase )] )
| 86 |
def UpperCamelCase ( snake_case__ : Optional[int] ) -> str:
UpperCamelCase : List[str] = [0] * len(snake_case__ )
UpperCamelCase : int = []
UpperCamelCase : Optional[int] = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
UpperCamelCase : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCamelCase : Tuple = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
__UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 40 | 0 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
if openai_config_file == "":
A__ = OpenAIGPTConfig()
else:
A__ = OpenAIGPTConfig.from_json_file(lowercase_ )
A__ = OpenAIGPTModel(lowercase_ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
A__ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
A__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase_ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 87 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 40 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class lowercase__ ( A_ ):
__UpperCAmelCase = '''ctrl'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , SCREAMING_SNAKE_CASE=24_6534 , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=1280 , SCREAMING_SNAKE_CASE=8192 , SCREAMING_SNAKE_CASE=48 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> List[str]:
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : Tuple = n_positions
_lowerCamelCase : Tuple = n_embd
_lowerCamelCase : List[Any] = n_layer
_lowerCamelCase : Union[str, Any] = n_head
_lowerCamelCase : Tuple = dff
_lowerCamelCase : List[Any] = resid_pdrop
_lowerCamelCase : Tuple = embd_pdrop
_lowerCamelCase : Tuple = layer_norm_epsilon
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : str = use_cache
super().__init__(**SCREAMING_SNAKE_CASE)
| 88 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 0 |
import torch
from diffusers import StableDiffusionPipeline
SCREAMING_SNAKE_CASE : List[str] = "path-to-your-trained-model"
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
SCREAMING_SNAKE_CASE : List[Any] = "A photo of sks dog in a bucket"
SCREAMING_SNAKE_CASE : Dict = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 89 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = XLNetTokenizer
lowercase__ : int = XLNetTokenizerFast
lowercase__ : Optional[int] = True
lowercase__ : Optional[int] = True
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = XLNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = '''<s>'''
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<eod>''' )
self.assertEqual(len(lowerCamelCase_ ) , 10_06 )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = XLNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [2_85, 46, 10, 1_70, 3_82] )
lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = XLNetTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = XLNetTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' )
lowerCAmelCase__ = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
# fmt: off
lowerCAmelCase__ = {'''input_ids''': [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
| 90 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''ViTFeatureExtractor''']
__UpperCAmelCase = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 0 |
"""simple docstring"""
import copy
import re
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str = '''hp'''
_lowerCamelCase: List[Any] = {}
_lowerCamelCase: List[Any] = None
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : List[str] ,A_ : Optional[Any] ) -> Tuple:
A = prefix
A = defaults
cls.build_naming_info()
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : Any ,A_ : List[Any] ) -> int:
if len(A_ ) == 0:
return ""
A = None
if any(char.isdigit() for char in word ):
raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 ,len(A_ ) + 1 ):
A = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
A = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(A_ : Optional[Any] ):
A = ''
while integer != 0:
A = chr(ord('A' ) + integer % 10 ) + s
integer //= 10
return s
A = 0
while True:
A = word + '#' + int_to_alphabetic(A_ )
if sword in info["reverse_short_word"]:
continue
else:
A = sword
break
A = short_word
A = word
return short_word
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]:
A = param_name.split('_' )
A = [TrialShortNamer.shortname_for_word(A_ ,A_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
A = ['', '_']
for separator in separators:
A = separator.join(A_ )
if shortname not in info["reverse_short_param"]:
A = shortname
A = param_name
return shortname
return param_name
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Any ) -> Tuple:
A = TrialShortNamer.shortname_for_key(A_ ,A_ )
A = short_name
A = param_name
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ) -> List[Any]:
if cls.NAMING_INFO is not None:
return
A = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
A = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(A_ ,A_ )
A = info
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
A = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
A = cls.NAMING_INFO['short_param'][k]
if isinstance(A_ ,A_ ):
A = 1 if v else 0
A = '' if isinstance(A_ ,(int, float) ) else '-'
A = F'{key}{sep}{v}'
name.append(A_ )
return "_".join(A_ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,A_ : Any ) -> int:
A = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
A = []
else:
A = repr.split('_' )
A = {}
for value in values:
if "-" in value:
A , A = value.split('-' )
else:
A = re.sub('[0-9.]' ,'' ,A_ )
A = float(re.sub('[^0-9.]' ,'' ,A_ ) )
A = cls.NAMING_INFO['reverse_short_param'][p_k]
A = p_v
for k in cls.DEFAULTS:
if k not in parameters:
A = cls.DEFAULTS[k]
return parameters
| 91 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__UpperCAmelCase = random.Random()
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : str=1.0 , snake_case__ : int=None , snake_case__ : Union[str, Any]=None ) -> Any:
if rng is None:
UpperCamelCase : int = global_rng
UpperCamelCase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=2000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1_6000, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, ) -> List[str]:
UpperCamelCase : Dict = parent
UpperCamelCase : Dict = batch_size
UpperCamelCase : Any = min_seq_length
UpperCamelCase : Optional[int] = max_seq_length
UpperCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase : Tuple = feature_size
UpperCamelCase : Any = padding_value
UpperCamelCase : Tuple = sampling_rate
UpperCamelCase : Optional[Any] = return_attention_mask
UpperCamelCase : Optional[Any] = do_normalize
def snake_case_ ( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Any = WavaVecaFeatureExtractor
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = WavaVecaFeatureExtractionTester(self )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_, axis=0 ) - 1 ) < 1e-3 ) )
def snake_case_ ( self ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase : Any = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Dict = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase : List[Any] = feat_extract(speech_inputs[0], return_tensors='np' ).input_values
UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0], return_tensors='np' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test batched
UpperCamelCase : List[Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : int = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : Dict = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : Any = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = feat_extract(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Tuple = range(800, 1400, 200 )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in lengths]
UpperCamelCase : int = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = feat_extract(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : int = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='max_length', return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='longest', return_tensors='np' )
UpperCamelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=2000, padding='longest', return_tensors='np' )
UpperCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def snake_case_ ( self ) -> str:
import torch
UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}], return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase : Any = feature_extractor.pad([{'input_values': inputs}], return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def snake_case_ ( self ) -> Tuple:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
UpperCamelCase : int = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask, config.feat_extract_norm == 'layer' )
| 40 | 0 |
'''simple docstring'''
from collections import defaultdict
def _lowerCAmelCase ( __magic_name__ : int ) -> int:
lowercase : Optional[Any] =1
lowercase : Union[str, Any] =True
for v in tree[start]:
if v not in visited:
ret += dfs(__magic_name__ )
if ret % 2 == 0:
cuts.append(__magic_name__ )
return ret
def _lowerCAmelCase ( ) -> int:
dfs(1 )
if __name__ == "__main__":
UpperCamelCase_ , UpperCamelCase_ = 10, 9
UpperCamelCase_ = defaultdict(list)
UpperCamelCase_ = {}
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 92 |
def UpperCamelCase ( snake_case__ : int ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
UpperCamelCase : int = False
if num < 0:
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Tuple = -num
UpperCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 0 |
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__A = logging.get_logger(__name__)
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
"""simple docstring"""
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Dict = to_pil_image(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = pil_image.size
lowerCAmelCase__ :Dict = pytesseract.image_to_data(_SCREAMING_SNAKE_CASE , lang=_SCREAMING_SNAKE_CASE , output_type='dict' , config=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Dict = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
lowerCAmelCase__ :List[str] = [idx for idx, word in enumerate(_SCREAMING_SNAKE_CASE ) if not word.strip()]
lowerCAmelCase__ :Optional[Any] = [word for idx, word in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowerCAmelCase__ :List[str] = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowerCAmelCase__ :str = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowerCAmelCase__ :List[str] = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowerCAmelCase__ :int = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase__ :List[Any] = []
for x, y, w, h in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Any = [x, y, x + w, y + h]
actual_boxes.append(_SCREAMING_SNAKE_CASE )
# finally, normalize the bounding boxes
lowerCAmelCase__ :str = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Optional[int] = ["""pixel_values"""]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 2_5_5 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = "" , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase__ :Union[str, Any] = get_size_dict(__UpperCAmelCase )
lowerCAmelCase__ :int = do_resize
lowerCAmelCase__ :Union[str, Any] = size
lowerCAmelCase__ :List[str] = resample
lowerCAmelCase__ :int = do_rescale
lowerCAmelCase__ :int = rescale_value
lowerCAmelCase__ :List[Any] = do_normalize
lowerCAmelCase__ :Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ :List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCAmelCase__ :Tuple = apply_ocr
lowerCAmelCase__ :List[str] = ocr_lang
lowerCAmelCase__ :Any = tesseract_config
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :int = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
lowerCAmelCase__ :Tuple = (size['height'], size['width'])
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :int = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ :Tuple = size if size is not None else self.size
lowerCAmelCase__ :str = get_size_dict(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = resample if resample is not None else self.resample
lowerCAmelCase__ :int = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ :str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ :str = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ :str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ :List[Any] = image_std if image_std is not None else self.image_std
lowerCAmelCase__ :List[str] = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase__ :Union[str, Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase__ :Union[str, Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase__ :str = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
lowerCAmelCase__ :Optional[Any] = [to_numpy_array(__UpperCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
lowerCAmelCase__ :Union[str, Any] = []
lowerCAmelCase__ :Dict = []
for image in images:
lowerCAmelCase__ , lowerCAmelCase__ :Dict = apply_tesseract(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
words_batch.append(__UpperCAmelCase )
boxes_batch.append(__UpperCAmelCase )
if do_resize:
lowerCAmelCase__ :Optional[int] = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_rescale:
lowerCAmelCase__ :List[Any] = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
lowerCAmelCase__ :List[Any] = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
lowerCAmelCase__ :Tuple = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
lowerCAmelCase__ :List[str] = BatchFeature(data={'pixel_values': images} , tensor_type=__UpperCAmelCase )
if apply_ocr:
lowerCAmelCase__ :int = words_batch
lowerCAmelCase__ :Tuple = boxes_batch
return data
| 93 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__UpperCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None ) -> Optional[Any]:
# Recurse if needed
if "." in tensor_name:
UpperCamelCase : List[Any] = tensor_name.split('.' )
for split in splits[:-1]:
UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
UpperCamelCase : Dict = new_module
UpperCamelCase : int = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
UpperCamelCase : Union[str, Any] = tensor_name in module._buffers
UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
UpperCamelCase : Optional[Any] = False
UpperCamelCase : str = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase : List[str] = False
UpperCamelCase : Tuple = False
else:
UpperCamelCase : Union[str, Any] = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
UpperCamelCase : Optional[int] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase : List[Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase : Dict = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
UpperCamelCase : List[Any] = value.to('cpu' )
if value.dtype == torch.inta:
UpperCamelCase : Tuple = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
UpperCamelCase : Union[str, Any] = torch.tensor(snake_case__ , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
UpperCamelCase : Union[str, Any] = new_value.T
UpperCamelCase : Union[str, Any] = old_value.__dict__
if is_abit:
UpperCamelCase : Optional[Any] = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
UpperCamelCase : Optional[Any] = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
UpperCamelCase : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
UpperCamelCase : Union[str, Any] = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
UpperCamelCase : List[str] = value.to(snake_case__ )
else:
UpperCamelCase : Tuple = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
UpperCamelCase : Optional[int] = new_value
else:
UpperCamelCase : Tuple = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
UpperCamelCase : List[str] = new_value
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Any=None , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=False ) -> int:
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase : str = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase , UpperCamelCase : Tuple = module.weight.shape
else:
UpperCamelCase : Any = module.in_features
UpperCamelCase : List[str] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase : Any = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCamelCase : Optional[int] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase : str = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCamelCase : int = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase : Any = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase : Optional[int] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None ) -> Optional[Any]:
UpperCamelCase : Union[str, Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase : List[str] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCamelCase ( *snake_case__ : Tuple , **snake_case__ : List[str] ) -> List[str]:
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def UpperCamelCase ( *snake_case__ : Dict , **snake_case__ : str ) -> Tuple:
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple ) -> List[Any]:
UpperCamelCase : int = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase : List[str] = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase : Union[str, Any] = sum(snake_case__ , [] )
UpperCamelCase : Optional[int] = len(snake_case__ ) > 0
# Check if it is a base model
UpperCamelCase : str = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase : List[Any] = list(model.named_children() )
UpperCamelCase : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase : Union[str, Any] = set(snake_case__ ) - set(snake_case__ )
UpperCamelCase : Optional[int] = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
UpperCamelCase : Tuple = ['.weight', '.bias']
UpperCamelCase : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase : Optional[int] = name.replace(snake_case__ , '' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 40 | 0 |
'''simple docstring'''
def lowercase_ ( __A : int ) -> int:
"""simple docstring"""
lowercase : Dict =1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowercase_ ( __A : int ) -> int:
"""simple docstring"""
lowercase : Union[str, Any] =0
while number > 0:
lowercase : List[Any] =number % 1_0
sum_of_digits += last_digit
lowercase : Tuple =number // 1_0 # Removing the last_digit from the given number
return sum_of_digits
def lowercase_ ( __A : int = 1_0_0 ) -> int:
"""simple docstring"""
lowercase : Union[str, Any] =factorial(__A )
lowercase : int =split_and_add(__A )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 94 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( snake_case__ : int ) -> Dict:
UpperCamelCase : Optional[Any] = tmp_path / 'file.csv'
UpperCamelCase : Optional[Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> List[str]:
UpperCamelCase : Optional[Any] = tmp_path / 'malformed_file.csv'
UpperCamelCase : Any = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> str:
UpperCamelCase : Any = tmp_path / 'csv_with_image.csv'
UpperCamelCase : Dict = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple:
UpperCamelCase : List[str] = tmp_path / 'csv_with_label.csv'
UpperCamelCase : Dict = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Dict ) -> List[str]:
UpperCamelCase : List[str] = tmp_path / 'csv_with_int_list.csv'
UpperCamelCase : Union[str, Any] = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[Any] ) -> List[Any]:
UpperCamelCase : str = Csv()
UpperCamelCase : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(snake_case__ , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(snake_case__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> Optional[int]:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : List[str] = f.read().splitlines()[1]
UpperCamelCase : int = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
UpperCamelCase : Any = csv._generate_tables([[csv_file_with_image]] )
UpperCamelCase : Any = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
UpperCamelCase : str = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( snake_case__ : Any ) -> str:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : Any = f.read().splitlines()[1:]
UpperCamelCase : Union[str, Any] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
UpperCamelCase : int = csv._generate_tables([[csv_file_with_label]] )
UpperCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
UpperCamelCase : List[str] = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(snake_case__ ) for label in labels]
def UpperCamelCase ( snake_case__ : str ) -> List[Any]:
UpperCamelCase : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda snake_case__ : [int(snake_case__ ) for i in x.split()]} )
UpperCamelCase : List[str] = csv._generate_tables([[csv_file_with_int_list]] )
UpperCamelCase : Union[str, Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
UpperCamelCase : str = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 40 | 0 |
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCamelCase_ = TypeVar('''T''')
class UpperCamelCase_ (Generic[T] ):
def __init__( self : Dict , lowerCAmelCase_ : bool = True ) -> None:
UpperCAmelCase_ : dict[T, list[T]] = {} # dictionary of lists
UpperCAmelCase_ : List[str] = directed
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : T , lowerCAmelCase_ : T ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase_ )
self.adj_list[destination_vertex].append(lowerCAmelCase_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowerCAmelCase_ )
UpperCAmelCase_ : Any = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
UpperCAmelCase_ : int = [destination_vertex]
UpperCAmelCase_ : str = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase_ )
UpperCAmelCase_ : str = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
UpperCAmelCase_ : Dict = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
UpperCAmelCase_ : Dict = [destination_vertex]
UpperCAmelCase_ : Any = []
return self
def __repr__( self : int ) -> str:
return pformat(self.adj_list )
| 95 |
import math
import random
def UpperCamelCase ( snake_case__ : float , snake_case__ : bool = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> float:
UpperCamelCase : Optional[Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(snake_case__ ):
# Forward propagation
UpperCamelCase : str = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCamelCase : int = (expected / 100) - layer_a
# Error delta
UpperCamelCase : List[str] = layer_1_error * sigmoid_function(snake_case__ , snake_case__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input('''Expected value: '''))
__UpperCAmelCase = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 40 | 0 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def a ( __UpperCAmelCase : Iterable[str] , __UpperCAmelCase : int ) -> Generator[tuple[str, ...], None, None]:
__magic_name__: Optional[Any] = iter(__UpperCAmelCase )
while True:
__magic_name__: Tuple = tuple(itertools.islice(__UpperCAmelCase , __UpperCAmelCase ) )
if not chunk:
return
yield chunk
def a ( __UpperCAmelCase : str ) -> str:
__magic_name__: str = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
__magic_name__: Optional[Any] = """"""
if len(__UpperCAmelCase ) < 2:
return dirty
for i in range(len(__UpperCAmelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__UpperCAmelCase ) & 1:
clean += "X"
return clean
def a ( __UpperCAmelCase : str ) -> list[str]:
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
__magic_name__: Dict = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__magic_name__: List[str] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__UpperCAmelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__UpperCAmelCase )
return table
def a ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> str:
__magic_name__: Optional[Any] = generate_table(__UpperCAmelCase )
__magic_name__: Optional[int] = prepare_input(__UpperCAmelCase )
__magic_name__: Optional[Any] = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__UpperCAmelCase , 2 ):
__magic_name__, __magic_name__: List[Any] = divmod(table.index(__UpperCAmelCase ) , 5 )
__magic_name__, __magic_name__: Tuple = divmod(table.index(__UpperCAmelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def a ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> str:
__magic_name__: int = generate_table(__UpperCAmelCase )
__magic_name__: Tuple = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__UpperCAmelCase , 2 ):
__magic_name__, __magic_name__: List[Any] = divmod(table.index(__UpperCAmelCase ) , 5 )
__magic_name__, __magic_name__: Any = divmod(table.index(__UpperCAmelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 96 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase ( snake_case__ : Dict ) -> Optional[int]:
return EnvironmentCommand()
class lowerCAmelCase_ ( a__ ):
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : List[Any] = parser.add_parser('env' )
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = huggingface_hub.__version__
UpperCamelCase : int = 'not installed'
UpperCamelCase : Union[str, Any] = 'NA'
if is_torch_available():
import torch
UpperCamelCase : Any = torch.__version__
UpperCamelCase : str = torch.cuda.is_available()
UpperCamelCase : Dict = 'not installed'
if is_transformers_available():
import transformers
UpperCamelCase : str = transformers.__version__
UpperCamelCase : Optional[Any] = 'not installed'
if is_accelerate_available():
import accelerate
UpperCamelCase : Dict = accelerate.__version__
UpperCamelCase : List[str] = 'not installed'
if is_xformers_available():
import xformers
UpperCamelCase : List[str] = xformers.__version__
UpperCamelCase : Dict = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(SCREAMING_SNAKE_CASE_ ) )
return info
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 40 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__a = (7_2_0, 1_2_8_0) # Height, Width
__a = (0.4, 0.6) # if height or width lower than this scale, drop it.
__a = 1 / 1_0_0
__a = ''
__a = ''
__a = ''
__a = 2_5_0
def a ( ):
'''simple docstring'''
lowercase_ , lowercase_ = get_dataset(snake_case__ , snake_case__ )
for index in range(snake_case__ ):
lowercase_ = random.sample(range(len(snake_case__ ) ) , 4 )
lowercase_ , lowercase_ , lowercase_ = update_image_and_anno(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , filter_scale=snake_case__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase_ = random_chars(32 )
lowercase_ = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
lowercase_ = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , snake_case__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
lowercase_ = []
for anno in new_annos:
lowercase_ = anno[3] - anno[1]
lowercase_ = anno[4] - anno[2]
lowercase_ = anno[1] + width / 2
lowercase_ = anno[2] + height / 2
lowercase_ = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(snake_case__ )
with open(F'''{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def a ( snake_case__: str , snake_case__: str ):
'''simple docstring'''
lowercase_ = []
lowercase_ = []
for label_file in glob.glob(os.path.join(snake_case__ , '''*.txt''' ) ):
lowercase_ = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(snake_case__ ) as in_file:
lowercase_ = in_file.readlines()
lowercase_ = os.path.join(snake_case__ , F'''{label_name}.jpg''' )
lowercase_ = []
for obj_list in obj_lists:
lowercase_ = obj_list.rstrip('''\n''' ).split(''' ''' )
lowercase_ = float(obj[1] ) - float(obj[3] ) / 2
lowercase_ = float(obj[2] ) - float(obj[4] ) / 2
lowercase_ = float(obj[1] ) + float(obj[3] ) / 2
lowercase_ = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case__ )
labels.append(snake_case__ )
return img_paths, labels
def a ( snake_case__: list , snake_case__: list , snake_case__: list[int] , snake_case__: tuple[int, int] , snake_case__: tuple[float, float] , snake_case__: float = 0.0 , ):
'''simple docstring'''
lowercase_ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowercase_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ = int(scale_x * output_size[1] )
lowercase_ = int(scale_y * output_size[0] )
lowercase_ = []
lowercase_ = []
for i, index in enumerate(snake_case__ ):
lowercase_ = all_img_list[index]
path_list.append(snake_case__ )
lowercase_ = all_annos[index]
lowercase_ = cva.imread(snake_case__ )
if i == 0: # top-left
lowercase_ = cva.resize(snake_case__ , (divid_point_x, divid_point_y) )
lowercase_ = img
for bbox in img_annos:
lowercase_ = bbox[1] * scale_x
lowercase_ = bbox[2] * scale_y
lowercase_ = bbox[3] * scale_x
lowercase_ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowercase_ = cva.resize(snake_case__ , (output_size[1] - divid_point_x, divid_point_y) )
lowercase_ = img
for bbox in img_annos:
lowercase_ = scale_x + bbox[1] * (1 - scale_x)
lowercase_ = bbox[2] * scale_y
lowercase_ = scale_x + bbox[3] * (1 - scale_x)
lowercase_ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowercase_ = cva.resize(snake_case__ , (divid_point_x, output_size[0] - divid_point_y) )
lowercase_ = img
for bbox in img_annos:
lowercase_ = bbox[1] * scale_x
lowercase_ = scale_y + bbox[2] * (1 - scale_y)
lowercase_ = bbox[3] * scale_x
lowercase_ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowercase_ = cva.resize(
snake_case__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowercase_ = img
for bbox in img_annos:
lowercase_ = scale_x + bbox[1] * (1 - scale_x)
lowercase_ = scale_y + bbox[2] * (1 - scale_y)
lowercase_ = scale_x + bbox[3] * (1 - scale_x)
lowercase_ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowercase_ = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def a ( snake_case__: int ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
lowercase_ = ascii_lowercase + digits
return "".join(random.choice(snake_case__ ) for _ in range(snake_case__ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 97 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''▁'''
__UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
__UpperCAmelCase = {
'''facebook/xglm-564M''': 2_048,
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None:
UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCamelCase : Any = 7
UpperCamelCase : Optional[int] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
UpperCamelCase : Dict = kwargs.get('additional_special_tokens', [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase : Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
UpperCamelCase : Optional[int] = len(self.sp_model )
UpperCamelCase : Any = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
UpperCamelCase : int = self.__dict__.copy()
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : Any = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
UpperCamelCase : Any = {}
UpperCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCamelCase : Optional[int] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def snake_case_ ( self ) -> int:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_, out_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase : Union[str, Any] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Dict = ''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_, ' ' ).strip()
return out_string
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_, 'wb' ) as fi:
UpperCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 40 | 0 |
'''simple docstring'''
lowercase__ : List[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowercase__ : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowercase__ : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 98 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
__UpperCAmelCase = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
UpperCAmelCase__ : Dict = RobertaTokenizer
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, errors=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, trim_offsets=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase : Dict = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('type' ) )
UpperCamelCase : List[str] = add_prefix_space
UpperCamelCase : Dict = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = add_prefix_space
UpperCamelCase : Optional[Any] = 'post_processor'
UpperCamelCase : Dict = getattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if tokenizer_component_instance:
UpperCamelCase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase : Optional[Any] = tuple(state['sep'] )
if "cls" in state:
UpperCamelCase : Optional[int] = tuple(state['cls'] )
UpperCamelCase : Any = False
if state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase : Optional[int] = add_prefix_space
UpperCamelCase : List[Any] = True
if state.get('trim_offsets', SCREAMING_SNAKE_CASE_ ) != trim_offsets:
UpperCamelCase : Dict = trim_offsets
UpperCamelCase : Union[str, Any] = True
if changes_to_apply:
UpperCamelCase : Tuple = getattr(SCREAMING_SNAKE_CASE_, state.pop('type' ) )
UpperCamelCase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE_ )
setattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else value
UpperCamelCase : List[Any] = value
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCamelCase : Optional[int] = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCamelCase : Dict = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCamelCase : Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Tuple:
UpperCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 40 | 0 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
__a = """laion/clap-htsat-unfused"""
__a = tempfile.mkdtemp()
def snake_case_ ( self , **__A ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **__A )
def snake_case_ ( self , **__A ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__A )
def snake_case_ ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self ):
__a = self.get_tokenizer()
__a = self.get_feature_extractor()
__a = ClapProcessor(tokenizer=__A , feature_extractor=__A )
processor.save_pretrained(self.tmpdirname )
__a = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def snake_case_ ( self ):
__a = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__a = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__a = self.get_feature_extractor(do_normalize=__A , padding_value=1.0 )
__a = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def snake_case_ ( self ):
__a = self.get_feature_extractor()
__a = self.get_tokenizer()
__a = ClapProcessor(tokenizer=__A , feature_extractor=__A )
__a = floats_list((3, 1000) )
__a = feature_extractor(__A , return_tensors="""np""" )
__a = processor(audios=__A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case_ ( self ):
__a = self.get_feature_extractor()
__a = self.get_tokenizer()
__a = ClapProcessor(tokenizer=__A , feature_extractor=__A )
__a = """This is a test string"""
__a = processor(text=__A )
__a = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self ):
__a = self.get_feature_extractor()
__a = self.get_tokenizer()
__a = ClapProcessor(tokenizer=__A , feature_extractor=__A )
__a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a = processor.batch_decode(__A )
__a = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
def snake_case_ ( self ):
__a = self.get_feature_extractor()
__a = self.get_tokenizer()
__a = ClapProcessor(tokenizer=__A , feature_extractor=__A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 99 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCAmelCase_ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
super().__init__(features=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = torch_tensor_kwargs
import torch # noqa import torch at initialization
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ):
return value.tolist()
UpperCamelCase : str = {}
if isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ):
UpperCamelCase : List[str] = {'dtype': torch.intaa}
elif isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ):
UpperCamelCase : int = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ):
UpperCamelCase : str = np.asarray(SCREAMING_SNAKE_CASE_ )
return torch.tensor(SCREAMING_SNAKE_CASE_, **{**default_dtype, **self.torch_tensor_kwargs} )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
import torch
# support for torch, tf, jax etc.
if hasattr(SCREAMING_SNAKE_CASE_, '__array__' ) and not isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ):
UpperCamelCase : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
return map_nested(self._recursive_tensorize, SCREAMING_SNAKE_CASE_, map_list=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : Dict = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> "torch.Tensor":
UpperCamelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_, pa_table.column_names[0] )
UpperCamelCase : Any = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = self._consolidate(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
for column_name in batch:
UpperCamelCase : str = self._consolidate(batch[column_name] )
return batch
| 40 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = 13
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = 30
SCREAMING_SNAKE_CASE__ = self.seq_length + self.mem_len
SCREAMING_SNAKE_CASE__ = 15
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = 99
SCREAMING_SNAKE_CASE__ = [10, 50, 80]
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 1_28
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = self.vocab_size - 1
SCREAMING_SNAKE_CASE__ = 0.01
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowercase_ ( self ):
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowercase_ ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFTransfoXLModel(A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(A_ ).to_tuple()
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids_a, '''mems''': mems_a}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(A_ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase_ ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFTransfoXLLMHeadModel(A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(A_ ).to_tuple()
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(A_ ).to_tuple()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model([input_ids_a, mems_a] ).to_tuple()
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(A_ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase_ ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFTransfoXLForSequenceClassification(A_ )
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase__ : List[str] = () if is_tf_available() else ()
lowerCamelCase__ : List[Any] = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[Any] = False
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFTransfoXLModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ , d_embed=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
self.model_tester.set_seed()
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
self.model_tester.set_seed()
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(A_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
assert isinstance(A_ , tf.keras.layers.Layer )
SCREAMING_SNAKE_CASE__ = model.get_bias()
assert name is None
else:
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
assert x is None
SCREAMING_SNAKE_CASE__ = model.get_bias()
assert name is None
def lowercase_ ( self ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = TFTransfoXLModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def lowercase_ ( self ):
'''simple docstring'''
pass
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
SCREAMING_SNAKE_CASE__ = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
SCREAMING_SNAKE_CASE__ = model.generate(A_ , max_length=2_00 , do_sample=A_ )
self.assertListEqual(output_ids[0].numpy().tolist() , A_ )
| 100 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
UpperCamelCase : int = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(snake_case__ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCamelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(snake_case__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCamelCase : Dict = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(snake_case__ )
UpperCamelCase : List[Any] = []
for value in value_array:
UpperCamelCase : Optional[Any] = euclidean(snake_case__ , dataset[0] )
UpperCamelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCamelCase : Union[str, Any] = euclidean(snake_case__ , snake_case__ )
if dist > temp_dist:
UpperCamelCase : str = temp_dist
UpperCamelCase : List[str] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float:
return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 0 |
from ...processing_utils import ProcessorMixin
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """WhisperFeatureExtractor"""
_UpperCAmelCase = """WhisperTokenizer"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.feature_extractor
SCREAMING_SNAKE_CASE_ : List[Any] = False
def UpperCamelCase__ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=lowerCAmelCase__ , language=lowerCAmelCase__ , no_timestamps=lowerCAmelCase__ )
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop('audio' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = kwargs.pop('sampling_rate' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('text' , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE_ : List[Any] = args[0]
SCREAMING_SNAKE_CASE_ : List[Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor(lowerCAmelCase__ , *lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = encodings['input_ids']
return inputs
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__="np" ):
"""simple docstring"""
return self.tokenizer.get_prompt_ids(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
| 101 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__UpperCAmelCase = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__UpperCAmelCase = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__UpperCAmelCase = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__UpperCAmelCase = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image)
__UpperCAmelCase = np.expand_dims(test_image, axis=0)
__UpperCAmelCase = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__UpperCAmelCase = '''Normal'''
if result[0][0] == 1:
__UpperCAmelCase = '''Abnormality detected'''
| 40 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ : Union[str, Any] = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Union[str, Any] = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__magic_name__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102 |
import os
import pytest
from attr import dataclass
__UpperCAmelCase = '''us-east-1''' # defaults region
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : str
UpperCAmelCase__ : Tuple = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
UpperCAmelCase__ : Union[str, Any] = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
UpperCAmelCase__ : Dict = {**hyperparameters, "max_steps": 1000}
@property
def snake_case_ ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ) -> str:
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ) -> str:
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def UpperCamelCase ( snake_case__ : Any ) -> Union[str, Any]:
UpperCamelCase : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 40 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
snake_case = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
snake_case = 2_5_0_0_0_4
snake_case = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Tuple = MBartaaTokenizer
A__ : Dict = MBartaaTokenizerFast
A__ : int = True
A__ : str = True
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case = MBartaaTokenizer(__lowerCamelCase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = '''<s>'''
_snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__lowerCamelCase ) , 1_0_5_4 )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_snake_case = MBartaaTokenizer(__lowerCamelCase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=__lowerCamelCase )
_snake_case = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCamelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
_snake_case = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_snake_case = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
# fmt: off
_snake_case = {'''input_ids''': [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_snake_case = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
_snake_case = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
_snake_case = tempfile.mkdtemp()
_snake_case = tokenizer_r.save_pretrained(__lowerCamelCase )
_snake_case = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_snake_case = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
_snake_case = tokenizer_r.from_pretrained(__lowerCamelCase )
_snake_case = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=True
_snake_case = tempfile.mkdtemp()
_snake_case = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
_snake_case = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
_snake_case = tokenizer_r.from_pretrained(__lowerCamelCase )
_snake_case = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=False
_snake_case = tempfile.mkdtemp()
_snake_case = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
_snake_case = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_snake_case = tokenizer_r.from_pretrained(__lowerCamelCase )
_snake_case = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
A__ : Dict = '''facebook/mbart-large-50-one-to-many-mmt'''
A__ : Any = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
A__ : Dict = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
A__ : Optional[Any] = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def __UpperCAmelCase ( cls : List[str] ):
"""simple docstring"""
_snake_case = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
_snake_case = 1
return cls
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 2_5_0_0_3_8 )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids )
_snake_case = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
_snake_case = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
_snake_case = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] , __lowerCamelCase )
_snake_case = 1_0
_snake_case = self.tokenizer(__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase ).input_ids[0]
self.assertEqual(ids[0] , __lowerCamelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = tempfile.mkdtemp()
_snake_case = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCamelCase )
_snake_case = MBartaaTokenizer.from_pretrained(__lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCamelCase )
@require_torch
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , return_tensors='''pt''' )
_snake_case = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_snake_case = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
_snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = self.tokenizer(self.src_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=3 , return_tensors='''pt''' )
_snake_case = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=1_0 , return_tensors='''pt''' )
_snake_case = targets['''input_ids''']
_snake_case = shift_tokens_right(__lowerCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , {
# en_XX, A, test, EOS
'''input_ids''': [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , )
| 103 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__UpperCAmelCase = '''src/transformers'''
__UpperCAmelCase = '''docs/source/en/tasks'''
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Any ) -> Optional[int]:
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase : Optional[Any] = f.readlines()
# Find the start prompt.
UpperCamelCase : List[Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
UpperCamelCase : Optional[Any] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
__UpperCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__UpperCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase : Tuple = TASK_GUIDE_TO_MODELS[task_guide]
UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
UpperCamelCase : Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def UpperCamelCase ( snake_case__ : str , snake_case__ : Optional[int]=False ) -> Tuple:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
UpperCamelCase : Optional[Any] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
' to fix this.' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__UpperCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 40 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> None:
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 104 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : int = IFPipeline
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case_ ( self ) -> str:
return self._get_dummy_components()
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA' )
def snake_case_ ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case_ ( self ) -> Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_local()
def snake_case_ ( self ) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def snake_case_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> List[Any]:
# if
UpperCamelCase : Union[str, Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0', variant='fp16', torch_dtype=torch.floataa )
UpperCamelCase : str = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0', variant='fp16', torch_dtype=torch.floataa, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
UpperCamelCase , UpperCamelCase : List[str] = pipe_a.encode_prompt('anime turtle', device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCamelCase : int = None
UpperCamelCase : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCamelCase : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
UpperCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCamelCase : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
UpperCamelCase : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
UpperCamelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : int = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : Dict = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = floats_tensor((1, 3, 256, 256), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 40 | 0 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mock.Mock()
SCREAMING_SNAKE_CASE_ : Optional[int] = 500
SCREAMING_SNAKE_CASE_ : List[Any] = {}
SCREAMING_SNAKE_CASE_ : Dict = HTTPError
SCREAMING_SNAKE_CASE_ : List[Any] = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE_ : Tuple = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case__ ) as mock_head:
SCREAMING_SNAKE_CASE_ : Dict = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def snake_case ( self ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mock.Mock()
SCREAMING_SNAKE_CASE_ : List[str] = 500
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
SCREAMING_SNAKE_CASE_ : str = HTTPError
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE_ : Optional[Any] = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case__ ) as mock_head:
SCREAMING_SNAKE_CASE_ : Optional[Any] = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case ( self ):
# This test is for deprecated behavior and can be removed in v5
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = tempfile.mktemp()
with open(snake_case__ ,'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = AlbertTokenizer.from_pretrained(snake_case__ )
finally:
os.remove(snake_case__ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' ,'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def snake_case ( self ):
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE_ : List[str] = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class lowerCAmelCase_ ( unittest.TestCase ):
__a : int = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def snake_case ( cls ):
SCREAMING_SNAKE_CASE_ : List[str] = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def snake_case ( cls ):
try:
delete_repo(token=cls._token ,repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def snake_case ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : Any = os.path.join(snake_case__ ,'vocab.txt' )
with open(snake_case__ ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertTokenizer(snake_case__ )
tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ : str = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ ,repo_id='test-tokenizer' ,push_to_hub=snake_case__ ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def snake_case ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : str = os.path.join(snake_case__ ,'vocab.txt' )
with open(snake_case__ ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ : List[str] = BertTokenizer(snake_case__ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
snake_case__ ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=snake_case__ ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ : Tuple = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def snake_case ( self ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(snake_case__ ,'vocab.txt' )
with open(snake_case__ ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = CustomTokenizer(snake_case__ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(snake_case__ ,'vocab.txt' )
with open(snake_case__ ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ : List[str] = BertTokenizerFast.from_pretrained(snake_case__ )
bert_tokenizer.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = CustomTokenizerFast.from_pretrained(snake_case__ )
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ : int = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast' )
SCREAMING_SNAKE_CASE_ : Any = AutoTokenizer.from_pretrained(
F'{USER}/test-dynamic-tokenizer' ,use_fast=snake_case__ ,trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS]', ' This is a ', 'extra_id_100'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) ,['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) ,['BC', 'A'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) ,['AB', 'C'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) ,['ABC', 'D'] )
def snake_case ( self ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
SCREAMING_SNAKE_CASE_ : Tuple = Trie()
SCREAMING_SNAKE_CASE_ : str = trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(snake_case__ ,['AB', 'C'] )
| 105 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def UpperCamelCase ( snake_case__ : Tuple="" ) -> str:
UpperCamelCase : Union[str, Any] = tempfile.mkdtemp()
return os.path.join(snake_case__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> int:
UpperCamelCase : Union[str, Any] = torch.rand(12, dtype=torch.floataa ) - 0.5
UpperCamelCase : Union[str, Any] = AgentAudio(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
# Ensure that the file contains the same value as the original tensor
UpperCamelCase , UpperCamelCase : Any = sf.read(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, torch.tensor(SCREAMING_SNAKE_CASE_ ), atol=1e-4 ) )
def snake_case_ ( self ) -> Any:
UpperCamelCase : Optional[int] = torch.rand(12, dtype=torch.floataa ) - 0.5
UpperCamelCase : Union[str, Any] = get_new_path(suffix='.wav' )
sf.write(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, 1_6000 )
UpperCamelCase : int = AgentAudio(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) )
self.assertEqual(agent_type.to_string(), SCREAMING_SNAKE_CASE_ )
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Any:
UpperCamelCase : Dict = torch.randint(0, 256, (64, 64, 3) )
UpperCamelCase : Union[str, Any] = AgentImage(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type._tensor, atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw(), Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
UpperCamelCase : Optional[int] = Image.open(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = AgentImage(SCREAMING_SNAKE_CASE_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
UpperCamelCase : Union[str, Any] = Image.open(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = AgentImage(SCREAMING_SNAKE_CASE_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = 'Hey!'
UpperCamelCase : Dict = AgentText(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_string() )
self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_raw() )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 40 | 0 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : List[Any] ) -> Tuple:
super().__init__()
A = nn.Linear(3 , 4 )
A = nn.BatchNormad(4 )
A = nn.Linear(4 , 5 )
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : List[Any] ) -> str:
return self.lineara(self.batchnorm(self.lineara(__UpperCamelCase ) ) )
class lowerCAmelCase__ ( _lowerCamelCase ):
def __UpperCamelCase ( self : Any , __UpperCamelCase : Any , *__UpperCamelCase : Any , **__UpperCamelCase : List[str] ) -> Dict:
return (args[0] + 1,) + args[1:], kwargs
class lowerCAmelCase__ ( _lowerCamelCase ):
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : Tuple ) -> str:
return output + 1
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ) -> Any:
A = ModelForTest()
A = ModelHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(test_model._hf_hook , __UpperCamelCase )
self.assertTrue(hasattr(__UpperCamelCase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(__UpperCamelCase )
self.assertFalse(hasattr(__UpperCamelCase , '_hf_hook' ) )
self.assertFalse(hasattr(__UpperCamelCase , '_old_forward' ) )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
A = ModelForTest()
A = ModelHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
add_hook_to_module(__UpperCamelCase , __UpperCamelCase , append=__UpperCamelCase )
self.assertEqual(isinstance(test_model._hf_hook , __UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__UpperCamelCase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(__UpperCamelCase )
self.assertFalse(hasattr(__UpperCamelCase , '_hf_hook' ) )
self.assertFalse(hasattr(__UpperCamelCase , '_old_forward' ) )
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
A = ModelForTest()
A = torch.randn(2 , 3 )
A = test_model(x + 1 )
A = test_model(x + 2 )
A = PreForwardHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
A = test_model(__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A = PreForwardHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
A = test_model(__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
A = test_model(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
A = ModelForTest()
A = torch.randn(2 , 3 )
A = test_model(__UpperCamelCase )
A = PostForwardHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
A = test_model(__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A = PostForwardHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
A = test_model(__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
A = test_model(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , output + 2 , atol=1e-5 )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
A = ModelForTest()
A = torch.randn(2 , 3 )
A = test_model(__UpperCamelCase )
A = PostForwardHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
A = test_model(__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
A = True
A = test_model(__UpperCamelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
A = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
A = torch.randn(2 , 3 )
A = model(__UpperCamelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__UpperCamelCase , AlignDevicesHook(io_same_device=__UpperCamelCase ) )
A = torch.randn(2 , 3 ).to(0 )
A = model(__UpperCamelCase )
self.assertEqual(output.device , torch.device(0 ) )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
A = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
A = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCamelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__UpperCamelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCamelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
A = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCamelCase )
A = torch.randn(2 , 3 )
A = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
A = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCamelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__UpperCamelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCamelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
A = torch.randn(2 , 3 )
A = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
A = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
A = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(__UpperCamelCase , execution_device=__UpperCamelCase , offload=__UpperCamelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
A = torch.device(__UpperCamelCase )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCamelCase )
A = torch.randn(2 , 3 )
A = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(__UpperCamelCase , execution_device=__UpperCamelCase , offload=__UpperCamelCase , offload_buffers=__UpperCamelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
A = torch.randn(2 , 3 )
A = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def __UpperCamelCase ( self : str ) -> List[Any]:
A = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
A = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
__UpperCamelCase , execution_device=__UpperCamelCase , offload=__UpperCamelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
A = torch.device(__UpperCamelCase )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCamelCase )
A = torch.randn(2 , 3 )
A = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__UpperCamelCase , execution_device=__UpperCamelCase , offload=__UpperCamelCase , weights_map=model.state_dict() , offload_buffers=__UpperCamelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
A = torch.randn(2 , 3 )
A = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 106 |
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Any ) -> Union[str, Any]:
UpperCamelCase : int = [1]
for i in range(2 , snake_case__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCamelCase : List[Any] = []
UpperCamelCase : List[Any] = list(range(snake_case__ ) )
# Find permutation
while factorials:
UpperCamelCase : int = factorials.pop()
UpperCamelCase , UpperCamelCase : int = divmod(snake_case__ , snake_case__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = ReformerTokenizer
__lowerCAmelCase = ReformerTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
def __UpperCAmelCase ( self : Any ) -> List[Any]:
super().setUp()
_A = ReformerTokenizer(UpperCamelCase__, keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
_A = '<s>'
_A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ), UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ), UpperCamelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '<unk>' )
self.assertEqual(vocab_keys[1], '<s>' )
self.assertEqual(vocab_keys[-1], 'j' )
self.assertEqual(len(UpperCamelCase__ ), 10_00 )
def __UpperCAmelCase ( self : str ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size, 10_00 )
def __UpperCAmelCase ( self : int ) -> int:
if not self.test_rust_tokenizer:
return
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = 'I was born in 92000, and this is falsé.'
_A = tokenizer.tokenize(UpperCamelCase__ )
_A = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
_A = tokenizer.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ )
_A = rust_tokenizer.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
_A = self.get_rust_tokenizer()
_A = tokenizer.encode(UpperCamelCase__ )
_A = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : int=15 ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_A = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__, **UpperCamelCase__ )
# Simple input
_A = 'This is a simple input'
_A = ['This is a simple input 1', 'This is a simple input 2']
_A = ('This is a simple input', 'This is a pair')
_A = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(UpperCamelCase__, tokenizer_r.encode, UpperCamelCase__, max_length=UpperCamelCase__, padding='max_length' )
# Simple input
self.assertRaises(UpperCamelCase__, tokenizer_r.encode_plus, UpperCamelCase__, max_length=UpperCamelCase__, padding='max_length' )
# Simple input
self.assertRaises(
UpperCamelCase__, tokenizer_r.batch_encode_plus, UpperCamelCase__, max_length=UpperCamelCase__, padding='max_length', )
# Pair input
self.assertRaises(UpperCamelCase__, tokenizer_r.encode, UpperCamelCase__, max_length=UpperCamelCase__, padding='max_length' )
# Pair input
self.assertRaises(UpperCamelCase__, tokenizer_r.encode_plus, UpperCamelCase__, max_length=UpperCamelCase__, padding='max_length' )
# Pair input
self.assertRaises(
UpperCamelCase__, tokenizer_r.batch_encode_plus, UpperCamelCase__, max_length=UpperCamelCase__, padding='max_length', )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> int:
_A = ReformerTokenizer(UpperCamelCase__, keep_accents=UpperCamelCase__ )
_A = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCamelCase__, ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ), [2_85, 46, 10, 1_70, 3_82], )
_A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCamelCase__, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
], )
_A = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__, [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4], )
_A = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
], )
@cached_property
def __UpperCAmelCase ( self : Any ) -> str:
return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' )
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
_A = 'Hello World!'
_A = [1_26, 32, 2_62, 1_52, 38, 72, 2_87]
self.assertListEqual(UpperCamelCase__, self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def __UpperCAmelCase ( self : str ) -> Optional[int]:
_A = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
_A = [
1_08,
2_65,
24,
1_11,
4,
2_58,
1_56,
35,
28,
2_75,
3,
2_59,
2_97,
2_60,
84,
4,
35,
1_10,
44,
8,
2_59,
91,
2_68,
21,
11,
2_09,
2_74,
1_09,
2_66,
2_77,
1_17,
86,
93,
3_15,
2_58,
2_78,
2_58,
2_77,
2_58,
0,
2_58,
2_88,
2_58,
3_19,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
2_87,
2_58,
3_15,
2_58,
2_89,
2_58,
2_78,
99,
2_69,
2_66,
2_62,
8,
2_59,
2_41,
4,
2_17,
2_30,
2_68,
2_66,
55,
1_68,
1_06,
75,
1_93,
2_66,
2_23,
27,
49,
26,
2_82,
25,
2_64,
2_99,
19,
26,
0,
2_58,
2_77,
1_17,
86,
93,
1_76,
1_83,
2_70,
11,
2_62,
42,
61,
2_65,
]
self.assertListEqual(UpperCamelCase__, self.big_tokenizer.encode(UpperCamelCase__ ) )
@require_torch
@slow
def __UpperCAmelCase ( self : str ) -> Dict:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_A = list(self.big_tokenizer.get_vocab().keys() )[:10]
_A = ' '.join(UpperCamelCase__ )
_A = self.big_tokenizer.encode_plus(UpperCamelCase__, return_tensors='pt' )
_A = self.big_tokenizer.batch_encode_plus([sequence, sequence], return_tensors='pt' )
_A = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_A = encoded_sequence['input_ids'].shape
_A = ReformerModel(UpperCamelCase__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCamelCase__ )
model(**UpperCamelCase__ )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Any:
# fmt: off
_A = {'input_ids': [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_A = [
'This is a very simple sentence.',
'The quick brown fox jumps over the lazy dog.',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__, model_name='google/reformer-crime-and-punishment', revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a', padding=UpperCamelCase__, sequences=UpperCamelCase__, )
| 107 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ ( a__ ):
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'width_multiplier' ) )
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_="swish", SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=0.25, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, ) -> Any:
UpperCamelCase : int = parent
UpperCamelCase : int = batch_size
UpperCamelCase : List[Any] = image_size
UpperCamelCase : List[str] = patch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : List[str] = make_divisible(512 * width_multiplier, divisor=8 )
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : Optional[int] = conv_kernel_size
UpperCamelCase : List[str] = output_stride
UpperCamelCase : Union[str, Any] = classifier_dropout_prob
UpperCamelCase : List[Any] = use_labels
UpperCamelCase : Any = is_training
UpperCamelCase : int = num_labels
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Tuple = scope
UpperCamelCase : List[str] = width_multiplier
UpperCamelCase : Any = ffn_dropout
UpperCamelCase : List[Any] = attn_dropout
def snake_case_ ( self ) -> int:
UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : List[str] = None
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCamelCase : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self ) -> int:
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : Any = MobileViTVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Optional[int] = self.num_labels
UpperCamelCase : Tuple = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Any = self.num_labels
UpperCamelCase : Optional[Any] = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = config_and_inputs
UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Tuple = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Any = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[Any] = False
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Dict = MobileViTVaModelTester(self )
UpperCamelCase : Optional[Any] = MobileViTVaConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def snake_case_ ( self ) -> int:
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def snake_case_ ( self ) -> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case_ ( self ) -> Any:
pass
def snake_case_ ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : str = [*signature.parameters.keys()]
UpperCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Tuple:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Tuple = outputs.hidden_states
UpperCamelCase : Dict = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCamelCase : Any = 2
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ) -> Optional[Any]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = MobileViTVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> str:
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.default_image_processor
UpperCamelCase : Any = prepare_img()
UpperCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : List[str] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : Any = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = outputs.logits
# verify the logits
UpperCamelCase : Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
], device=SCREAMING_SNAKE_CASE_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : str = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Optional[int] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Tuple = prepare_img()
UpperCamelCase : int = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : str = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = outputs.logits.detach().cpu()
UpperCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_, target_sizes=[(50, 60)] )
UpperCamelCase : Optional[int] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
| 40 | 0 |
import qiskit
def _SCREAMING_SNAKE_CASE ( __snake_case = 2 ) -> qiskit.result.counts.Counts:
_UpperCAmelCase = qubits
# Using Aer's simulator
_UpperCAmelCase = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
_UpperCAmelCase = qiskit.QuantumCircuit(__snake_case , __snake_case )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , __snake_case ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , __snake_case )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(__snake_case ) ) , list(range(__snake_case ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_UpperCAmelCase = qiskit.execute(__snake_case , __snake_case , shots=1_0_0_0 )
return job.result().get_counts(__snake_case )
if __name__ == "__main__":
print(F"Total count for various states are: {quantum_entanglement(3)}")
| 108 |
def UpperCamelCase ( snake_case__ : Optional[int] ) -> str:
UpperCamelCase : List[str] = [0] * len(snake_case__ )
UpperCamelCase : int = []
UpperCamelCase : Optional[int] = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
UpperCamelCase : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCamelCase : Tuple = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
__UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 40 | 0 |
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Optional[Any] = BertJapaneseTokenizer
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : str = True
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().setUp()
__SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """こんにちは、世界。 \nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_input_output_texts(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase ,add_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.decode(lowerCamelCase ,clean_up_tokenization_spaces=lowerCamelCase )
return text, ids
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(lowerCamelCase ,["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="""mecab""" )
self.assertIsNotNone(lowerCamelCase )
__SCREAMING_SNAKE_CASE = """こんにちは、世界。\nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase ,["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,"""tokenizer.bin""" )
with open(lowerCamelCase ,"""wb""" ) as handle:
pickle.dump(lowerCamelCase ,lowerCamelCase )
with open(lowerCamelCase ,"""rb""" ) as handle:
__SCREAMING_SNAKE_CASE = pickle.load(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
try:
__SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
try:
__SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MecabTokenizer(do_lower_case=lowerCamelCase ,mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
try:
__SCREAMING_SNAKE_CASE = MecabTokenizer(
do_lower_case=lowerCamelCase ,normalize_text=lowerCamelCase ,mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] ,)
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MecabTokenizer(normalize_text=lowerCamelCase ,mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] ,)
@require_sudachi
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(lowerCamelCase )
__SCREAMING_SNAKE_CASE = """こんにちは、世界。\nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase ,["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,"""tokenizer.bin""" )
with open(lowerCamelCase ,"""wb""" ) as handle:
pickle.dump(lowerCamelCase ,lowerCamelCase )
with open(lowerCamelCase ,"""rb""" ) as handle:
__SCREAMING_SNAKE_CASE = pickle.load(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
@require_sudachi
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,[""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] ,)
@require_sudachi
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" ,sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) ,["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" ,sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) ,["""外国人""", """参政権"""] )
@require_sudachi
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" ,sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) ,["""外国人参政権"""] )
@require_sudachi
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SudachiTokenizer(do_lower_case=lowerCamelCase ,sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,[""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] ,)
@require_sudachi
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SudachiTokenizer(normalize_text=lowerCamelCase ,sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,[""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] ,)
@require_sudachi
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SudachiTokenizer(trim_whitespace=lowerCamelCase ,sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
@require_jumanpp
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(lowerCamelCase )
__SCREAMING_SNAKE_CASE = """こんにちは、世界。\nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase ,["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,"""tokenizer.bin""" )
with open(lowerCamelCase ,"""wb""" ) as handle:
pickle.dump(lowerCamelCase ,lowerCamelCase )
with open(lowerCamelCase ,"""rb""" ) as handle:
__SCREAMING_SNAKE_CASE = pickle.load(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
@require_jumanpp
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] ,)
@require_jumanpp
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = JumanppTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] ,)
@require_jumanpp
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = JumanppTokenizer(normalize_text=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] ,)
@require_jumanpp
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = JumanppTokenizer(trim_whitespace=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] ,)
@require_jumanpp
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) ,["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] ,)
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
__SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=lowerCamelCase ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) ,["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) ,["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) ,["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
__SCREAMING_SNAKE_CASE = tokenizer.subword_tokenizer
__SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(lowerCamelCase ,["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
__SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(lowerCamelCase ,["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
__SCREAMING_SNAKE_CASE = tokenizer.encode("""ありがとう。""" ,add_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.encode("""どういたしまして。""" ,add_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase ,lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Dict = BertJapaneseTokenizer
__UpperCamelCase : Optional[int] = False
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCAmelCase__ ( self : Optional[int] ,**lowerCamelCase : int ):
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname ,subword_tokenizer_type="""character""" ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """こんにちは、世界。 \nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file ,subword_tokenizer_type="""character""" )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
lowerCamelCase ,["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) ,[3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = CharacterTokenizer(vocab=lowerCamelCase ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) ,["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) ,["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
__SCREAMING_SNAKE_CASE = tokenizer.encode("""ありがとう。""" ,add_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.encode("""どういたしまして。""" ,add_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase ,lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """cl-tohoku/bert-base-japanese"""
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" ,level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
__SCREAMING_SNAKE_CASE = """bert-base-cased"""
with self.assertLogs("""transformers""" ,level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 109 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 40 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
UpperCamelCase__ = {
'junnyu/roformer_chinese_small': 15_36,
'junnyu/roformer_chinese_base': 15_36,
'junnyu/roformer_chinese_char_small': 5_12,
'junnyu/roformer_chinese_char_base': 5_12,
'junnyu/roformer_small_discriminator': 1_28,
'junnyu/roformer_small_generator': 1_28,
}
UpperCamelCase__ = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class a ( lowercase ):
UpperCamelCase : int = VOCAB_FILES_NAMES
UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : Optional[int] = RoFormerTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCAmelCase__ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , UpperCamelCase_ ) != do_lower_case
or pre_tok_state.get('strip_accents' , UpperCamelCase_ ) != strip_accents
):
UpperCAmelCase__ : Any = getattr(UpperCamelCase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase__ : str = do_lower_case
UpperCAmelCase__ : Union[str, Any] = strip_accents
UpperCAmelCase__ : Dict = pre_tok_class(**UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = do_lower_case
def __getstate__( self ):
UpperCAmelCase__ : int = self.__dict__.copy()
UpperCAmelCase__ : int = BertPreTokenizer()
return state
def __setstate__( self , UpperCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = d
UpperCAmelCase__ : List[str] = self.__dict__['_tokenizer'].get_vocab()
UpperCAmelCase__ : List[Any] = PreTokenizer.custom(JiebaPreTokenizer(UpperCamelCase_ ) )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=None ):
UpperCAmelCase__ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
UpperCAmelCase__ : int = [self.sep_token_id]
UpperCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
UpperCAmelCase__ : Any = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=False , **UpperCamelCase_ , ):
UpperCAmelCase__ : int = BertPreTokenizer()
return super().save_pretrained(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
| 110 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 0 |
'''simple docstring'''
from timeit import timeit
__a = {
"MALAYALAM": True,
"String": False,
"rotor": True,
"level": True,
"A": True,
"BB": True,
"ABC": False,
"amanaplanacanalpanama": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def __snake_case( _lowerCAmelCase ) -> bool:
snake_case__ : str = 0
snake_case__ : Optional[int] = len(snake_case__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def __snake_case( _lowerCAmelCase ) -> bool:
snake_case__ : List[str] = len(snake_case__ ) // 2
snake_case__ : Optional[int] = len(snake_case__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(snake_case__ ) )
def __snake_case( _lowerCAmelCase ) -> bool:
if len(snake_case__ ) <= 2:
return True
if s[0] == s[len(snake_case__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def __snake_case( _lowerCAmelCase ) -> bool:
return s == s[::-1]
def __snake_case( _lowerCAmelCase ) -> None:
snake_case__ : List[Any] = f"all({name}(key) is value for key, value in test_data.items())"
snake_case__ : Optional[Any] = f"from __main__ import test_data, {name}"
snake_case__ : Dict = 500_000
snake_case__ : Optional[int] = timeit(stmt=snake_case__ , setup=snake_case__ , number=snake_case__ )
print(f"{name:<35} finished {number:,} runs in {result:.5f} seconds" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"{key:21} {value}")
print("a man a plan a canal panama")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("is_palindrome_slice")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("is_palindrome")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("is_palindrome_recursive")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("is_palindrome_traversal")
| 374 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 0 |
import math
import os
import sys
def A ( __UpperCamelCase ) -> str:
A__ = ''
try:
with open(snake_case__ , 'rb' ) as binary_file:
A__ = binary_file.read()
for dat in data:
A__ = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> None:
lexicon.pop(snake_case__ )
A__ = last_match_id
if math.loga(snake_case__ ).is_integer():
for curr_key in lexicon:
A__ = '0' + lexicon[curr_key]
A__ = bin(snake_case__ )[2:]
def A ( __UpperCamelCase ) -> str:
A__ = {'0': '0', '1': '1'}
A__ = '', ''
A__ = len(snake_case__ )
for i in range(len(snake_case__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A__ = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
index += 1
A__ = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
A__ = lexicon[curr_string]
result += last_match_id
return result
def A ( __UpperCamelCase , __UpperCamelCase ) -> str:
A__ = os.path.getsize(snake_case__ )
A__ = bin(snake_case__ )[2:]
A__ = len(snake_case__ )
return "0" * (length_length - 1) + file_length_binary + compressed
def A ( __UpperCamelCase , __UpperCamelCase ) -> None:
A__ = 8
try:
with open(snake_case__ , 'wb' ) as opened_file:
A__ = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case__ ) , snake_case__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def A ( __UpperCamelCase , __UpperCamelCase ) -> None:
A__ = read_file_binary(snake_case__ )
A__ = compress_data(snake_case__ )
A__ = add_file_length(snake_case__ , snake_case__ )
write_file_binary(snake_case__ , snake_case__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 9 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''ViTFeatureExtractor''']
__UpperCAmelCase = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 0 |
'''simple docstring'''
from math import isqrt
def lowerCamelCase__ ( a ):
__snake_case = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case__ , snake_case__ ):
__snake_case = False
return [i for i in range(2 , snake_case__ ) if is_prime[i]]
def lowerCamelCase__ ( a = 10**8 ):
__snake_case = calculate_prime_numbers(max_number // 2 )
__snake_case = 0
__snake_case = 0
__snake_case = len(snake_case__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 356 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__UpperCAmelCase = random.Random()
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : str=1.0 , snake_case__ : int=None , snake_case__ : Union[str, Any]=None ) -> Any:
if rng is None:
UpperCamelCase : int = global_rng
UpperCamelCase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=2000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1_6000, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, ) -> List[str]:
UpperCamelCase : Dict = parent
UpperCamelCase : Dict = batch_size
UpperCamelCase : Any = min_seq_length
UpperCamelCase : Optional[int] = max_seq_length
UpperCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase : Tuple = feature_size
UpperCamelCase : Any = padding_value
UpperCamelCase : Tuple = sampling_rate
UpperCamelCase : Optional[Any] = return_attention_mask
UpperCamelCase : Optional[Any] = do_normalize
def snake_case_ ( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Any = WavaVecaFeatureExtractor
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = WavaVecaFeatureExtractionTester(self )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_, axis=0 ) - 1 ) < 1e-3 ) )
def snake_case_ ( self ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase : Any = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Dict = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase : List[Any] = feat_extract(speech_inputs[0], return_tensors='np' ).input_values
UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0], return_tensors='np' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test batched
UpperCamelCase : List[Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : int = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : Dict = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : Any = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = feat_extract(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Tuple = range(800, 1400, 200 )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in lengths]
UpperCamelCase : int = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = feat_extract(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : int = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='max_length', return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='longest', return_tensors='np' )
UpperCamelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=2000, padding='longest', return_tensors='np' )
UpperCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def snake_case_ ( self ) -> str:
import torch
UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}], return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase : Any = feature_extractor.pad([{'input_values': inputs}], return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def snake_case_ ( self ) -> Tuple:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
UpperCamelCase : int = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask, config.feat_extract_norm == 'layer' )
| 40 | 0 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self: int , a: str , a: Dict=13 , a: List[str]=30 , a: Optional[int]=2 , a: Any=3 , a: int=True , a: Any=True , a: List[Any]=32 , a: List[str]=5 , a: Optional[int]=4 , a: Dict=37 , a: Optional[Any]="gelu" , a: Dict=0.1 , a: Dict=0.1 , a: int=10 , a: Tuple=0.02 , a: Tuple=3 , a: Dict=None , a: int=2 , ) ->Any:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = is_training
a_ = use_labels
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = type_sequence_label_size
a_ = initializer_range
a_ = scope
a_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
a_ = (image_size // patch_size) ** 2
a_ = num_patches + 2
def _lowerCAmelCase ( self: List[Any]) ->Tuple:
'''simple docstring'''
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self: Optional[int]) ->Optional[int]:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCAmelCase ( self: str , a: List[str] , a: Optional[int] , a: List[str]) ->Union[str, Any]:
'''simple docstring'''
a_ = DeiTModel(config=SCREAMING_SNAKE_CASE_)
model.to(SCREAMING_SNAKE_CASE_)
model.eval()
a_ = model(SCREAMING_SNAKE_CASE_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Tuple , a: str , a: Dict , a: List[str]) ->Tuple:
'''simple docstring'''
a_ = DeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_)
model.to(SCREAMING_SNAKE_CASE_)
model.eval()
a_ = model(SCREAMING_SNAKE_CASE_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a_ = 1
a_ = DeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE_)
model.to(SCREAMING_SNAKE_CASE_)
model.eval()
a_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a_ = model(SCREAMING_SNAKE_CASE_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _lowerCAmelCase ( self: Any , a: Optional[Any] , a: Any , a: Union[str, Any]) ->List[Any]:
'''simple docstring'''
a_ = self.type_sequence_label_size
a_ = DeiTForImageClassification(SCREAMING_SNAKE_CASE_)
model.to(SCREAMING_SNAKE_CASE_)
model.eval()
a_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a_ = 1
a_ = DeiTForImageClassification(SCREAMING_SNAKE_CASE_)
model.to(SCREAMING_SNAKE_CASE_)
model.eval()
a_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self: List[str]) ->Dict:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
a_
) = config_and_inputs
a_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase =(
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_UpperCAmelCase =False
_UpperCAmelCase =False
_UpperCAmelCase =False
def _lowerCAmelCase ( self: Dict) ->Optional[int]:
'''simple docstring'''
a_ = DeiTModelTester(self)
a_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37)
def _lowerCAmelCase ( self: List[Any]) ->Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds")
def _lowerCAmelCase ( self: str) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: List[Any]) ->List[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear))
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(SCREAMING_SNAKE_CASE_)
a_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_)
def _lowerCAmelCase ( self: Tuple) ->str:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_)
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE_)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_)
def _lowerCAmelCase ( self: str , a: Any , a: str , a: List[Any]=False) ->Optional[Any]:
'''simple docstring'''
a_ = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_)
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _lowerCAmelCase ( self: List[str]) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(SCREAMING_SNAKE_CASE_)
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
a_ = model_class(SCREAMING_SNAKE_CASE_)
model.to(SCREAMING_SNAKE_CASE_)
model.train()
a_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_)
a_ = model(**SCREAMING_SNAKE_CASE_).loss
loss.backward()
def _lowerCAmelCase ( self: List[str]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a_ = False
a_ = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE_) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
a_ = model_class(SCREAMING_SNAKE_CASE_)
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE_)
model.train()
a_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_)
a_ = model(**SCREAMING_SNAKE_CASE_).loss
loss.backward()
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(SCREAMING_SNAKE_CASE_),
*get_values(SCREAMING_SNAKE_CASE_),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}"""):
a_ = problem_type['title']
a_ = problem_type['num_labels']
a_ = model_class(SCREAMING_SNAKE_CASE_)
model.to(SCREAMING_SNAKE_CASE_)
model.train()
a_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_)
if problem_type["num_labels"] > 1:
a_ = inputs['labels'].unsqueeze(1).repeat(1 , problem_type["num_labels"])
a_ = inputs['labels'].to(problem_type["dtype"])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE_) as warning_list:
a_ = model(**SCREAMING_SNAKE_CASE_).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""")
loss.backward()
@slow
def _lowerCAmelCase ( self: Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = DeiTModel.from_pretrained(SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
def __UpperCAmelCase () -> List[str]:
'''simple docstring'''
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def _lowerCAmelCase ( self: Dict) ->List[str]:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224")
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase ( self: List[str]) ->Union[str, Any]:
'''simple docstring'''
a_ = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224").to(
SCREAMING_SNAKE_CASE_)
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt").to(SCREAMING_SNAKE_CASE_)
# forward pass
with torch.no_grad():
a_ = model(**SCREAMING_SNAKE_CASE_)
# verify the logits
a_ = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_)
a_ = torch.tensor([-1.0266, 0.1912, -1.2861]).to(SCREAMING_SNAKE_CASE_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def _lowerCAmelCase ( self: Any) ->str:
'''simple docstring'''
a_ = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto")
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt")
a_ = inputs.pixel_values.to(SCREAMING_SNAKE_CASE_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
a_ = model(SCREAMING_SNAKE_CASE_)
| 685 |
def UpperCamelCase ( snake_case__ : int ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
UpperCamelCase : int = False
if num < 0:
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Tuple = -num
UpperCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , A_ , A_=13 , A_=32 , A_=3 , A_=4 , A_=[10, 20, 30, 40] , A_=[2, 2, 3, 2] , A_=True , A_=True , A_=37 , A_="gelu" , A_=10 , A_=0.02 , A_=["stage2", "stage3", "stage4"] , A_=3 , A_=None , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = parent
UpperCamelCase : Optional[Any] = batch_size
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : int = num_stages
UpperCamelCase : Any = hidden_sizes
UpperCamelCase : Tuple = depths
UpperCamelCase : Tuple = is_training
UpperCamelCase : Optional[Any] = use_labels
UpperCamelCase : Optional[int] = intermediate_size
UpperCamelCase : Any = hidden_act
UpperCamelCase : Dict = type_sequence_label_size
UpperCamelCase : int = initializer_range
UpperCamelCase : List[str] = out_features
UpperCamelCase : str = num_labels
UpperCamelCase : Optional[int] = scope
UpperCamelCase : Tuple = num_stages
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : str = None
if self.use_labels:
UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase( self ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __UpperCamelCase( self ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=SCREAMING_SNAKE_CASE_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=SCREAMING_SNAKE_CASE_ , loss_ignore_index=255 , num_labels=self.num_labels , )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = UperNetForSemanticSegmentation(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
UpperCamelCase
) : str = config_and_inputs
UpperCamelCase : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase :List[str] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_UpperCAmelCase :Union[str, Any] = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
_UpperCAmelCase :Any = False
_UpperCAmelCase :str = False
_UpperCAmelCase :Optional[int] = False
_UpperCAmelCase :Any = False
_UpperCAmelCase :str = False
_UpperCAmelCase :Tuple = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = UperNetModelTester(self )
UpperCamelCase : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase( self ):
'''simple docstring'''
return
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : int = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : List[Any] = [*signature.parameters.keys()]
UpperCamelCase : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
def check_hidden_states_output(A_ , A_ , A_ ):
UpperCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase : Dict = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Tuple = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : str = _config_zero_init(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
UpperCamelCase : Dict = model_class(config=SCREAMING_SNAKE_CASE_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="UperNet does not have tied weights" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = UperNetForSemanticSegmentation.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def A_ ( ) -> Optional[Any]:
UpperCamelCase : Dict = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
UpperCamelCase : List[Any] = Image.open(snake_case__ ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
UpperCamelCase : int = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : Union[str, Any] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase : str = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
UpperCamelCase : Dict = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = prepare_img()
UpperCamelCase : List[Any] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 629 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__UpperCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None ) -> Optional[Any]:
# Recurse if needed
if "." in tensor_name:
UpperCamelCase : List[Any] = tensor_name.split('.' )
for split in splits[:-1]:
UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
UpperCamelCase : Dict = new_module
UpperCamelCase : int = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
UpperCamelCase : Union[str, Any] = tensor_name in module._buffers
UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
UpperCamelCase : Optional[Any] = False
UpperCamelCase : str = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase : List[str] = False
UpperCamelCase : Tuple = False
else:
UpperCamelCase : Union[str, Any] = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
UpperCamelCase : Optional[int] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase : List[Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase : Dict = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
UpperCamelCase : List[Any] = value.to('cpu' )
if value.dtype == torch.inta:
UpperCamelCase : Tuple = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
UpperCamelCase : Union[str, Any] = torch.tensor(snake_case__ , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
UpperCamelCase : Union[str, Any] = new_value.T
UpperCamelCase : Union[str, Any] = old_value.__dict__
if is_abit:
UpperCamelCase : Optional[Any] = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
UpperCamelCase : Optional[Any] = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
UpperCamelCase : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
UpperCamelCase : Union[str, Any] = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
UpperCamelCase : List[str] = value.to(snake_case__ )
else:
UpperCamelCase : Tuple = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
UpperCamelCase : Optional[int] = new_value
else:
UpperCamelCase : Tuple = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
UpperCamelCase : List[str] = new_value
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Any=None , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=False ) -> int:
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase : str = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase , UpperCamelCase : Tuple = module.weight.shape
else:
UpperCamelCase : Any = module.in_features
UpperCamelCase : List[str] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase : Any = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCamelCase : Optional[int] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase : str = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCamelCase : int = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase : Any = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase : Optional[int] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None ) -> Optional[Any]:
UpperCamelCase : Union[str, Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase : List[str] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCamelCase ( *snake_case__ : Tuple , **snake_case__ : List[str] ) -> List[str]:
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def UpperCamelCase ( *snake_case__ : Dict , **snake_case__ : str ) -> Tuple:
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple ) -> List[Any]:
UpperCamelCase : int = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase : List[str] = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase : Union[str, Any] = sum(snake_case__ , [] )
UpperCamelCase : Optional[int] = len(snake_case__ ) > 0
# Check if it is a base model
UpperCamelCase : str = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase : List[Any] = list(model.named_children() )
UpperCamelCase : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase : Union[str, Any] = set(snake_case__ ) - set(snake_case__ )
UpperCamelCase : Optional[int] = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
UpperCamelCase : Tuple = ['.weight', '.bias']
UpperCamelCase : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase : Optional[int] = name.replace(snake_case__ , '' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 40 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A : Optional[Any] = logging.get_logger(__name__)
A : List[Any] = {
'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase ( a__ , a__ ):
'''simple docstring'''
A = "dinat"
A = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :str , lowerCamelCase_ :List[Any]=4 , lowerCamelCase_ :Dict=3 , lowerCamelCase_ :Optional[int]=6_4 , lowerCamelCase_ :str=[3, 4, 6, 5] , lowerCamelCase_ :Tuple=[2, 4, 8, 1_6] , lowerCamelCase_ :List[str]=7 , lowerCamelCase_ :int=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase_ :int=3.0 , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :str=0.0 , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :List[str]="gelu" , lowerCamelCase_ :Any=0.02 , lowerCamelCase_ :Optional[int]=1e-5 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Dict=None , lowerCamelCase_ :Optional[Any]=None , **lowerCamelCase_ :str , ) -> int:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = num_heads
UpperCamelCase__ = kernel_size
UpperCamelCase__ = dilations
UpperCamelCase__ = mlp_ratio
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = hidden_act
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE_ ) - 1) )
UpperCamelCase__ = layer_scale_init_value
UpperCamelCase__ = ['stem'] + [f'stage{idx}' for idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) + 1 )]
UpperCamelCase__ = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 516 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( snake_case__ : int ) -> Dict:
UpperCamelCase : Optional[Any] = tmp_path / 'file.csv'
UpperCamelCase : Optional[Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> List[str]:
UpperCamelCase : Optional[Any] = tmp_path / 'malformed_file.csv'
UpperCamelCase : Any = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> str:
UpperCamelCase : Any = tmp_path / 'csv_with_image.csv'
UpperCamelCase : Dict = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple:
UpperCamelCase : List[str] = tmp_path / 'csv_with_label.csv'
UpperCamelCase : Dict = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Dict ) -> List[str]:
UpperCamelCase : List[str] = tmp_path / 'csv_with_int_list.csv'
UpperCamelCase : Union[str, Any] = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[Any] ) -> List[Any]:
UpperCamelCase : str = Csv()
UpperCamelCase : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(snake_case__ , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(snake_case__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> Optional[int]:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : List[str] = f.read().splitlines()[1]
UpperCamelCase : int = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
UpperCamelCase : Any = csv._generate_tables([[csv_file_with_image]] )
UpperCamelCase : Any = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
UpperCamelCase : str = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( snake_case__ : Any ) -> str:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : Any = f.read().splitlines()[1:]
UpperCamelCase : Union[str, Any] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
UpperCamelCase : int = csv._generate_tables([[csv_file_with_label]] )
UpperCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
UpperCamelCase : List[str] = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(snake_case__ ) for label in labels]
def UpperCamelCase ( snake_case__ : str ) -> List[Any]:
UpperCamelCase : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda snake_case__ : [int(snake_case__ ) for i in x.split()]} )
UpperCamelCase : List[str] = csv._generate_tables([[csv_file_with_int_list]] )
UpperCamelCase : Union[str, Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
UpperCamelCase : str = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 40 | 0 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 369 |
import math
import random
def UpperCamelCase ( snake_case__ : float , snake_case__ : bool = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> float:
UpperCamelCase : Optional[Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(snake_case__ ):
# Forward propagation
UpperCamelCase : str = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCamelCase : int = (expected / 100) - layer_a
# Error delta
UpperCamelCase : List[str] = layer_1_error * sigmoid_function(snake_case__ , snake_case__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input('''Expected value: '''))
__UpperCAmelCase = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 40 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCAmelCase : Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCAmelCase : Optional[Any] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
lowerCamelCase = self.diffusers_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE_ , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowerCamelCase = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : str , __snake_case : str=None ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowerCamelCase = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowerCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCamelCase = black.format_str(SCREAMING_SNAKE_CASE_ , mode=SCREAMING_SNAKE_CASE_ )
lowerCamelCase = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(SCREAMING_SNAKE_CASE_ , 'w' , newline='\n' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , 'r' ) as f:
self.assertTrue(f.read() , SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
lowerCamelCase = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , SCREAMING_SNAKE_CASE_ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , SCREAMING_SNAKE_CASE_ ) , )
# Copy consistency with a really long name
lowerCamelCase = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub('Bert' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , SCREAMING_SNAKE_CASE_ , overwrite_result=re.sub('DDPM' , 'Test' , SCREAMING_SNAKE_CASE_ ) , )
| 246 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase ( snake_case__ : Dict ) -> Optional[int]:
return EnvironmentCommand()
class lowerCAmelCase_ ( a__ ):
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : List[Any] = parser.add_parser('env' )
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = huggingface_hub.__version__
UpperCamelCase : int = 'not installed'
UpperCamelCase : Union[str, Any] = 'NA'
if is_torch_available():
import torch
UpperCamelCase : Any = torch.__version__
UpperCamelCase : str = torch.cuda.is_available()
UpperCamelCase : Dict = 'not installed'
if is_transformers_available():
import transformers
UpperCamelCase : str = transformers.__version__
UpperCamelCase : Optional[Any] = 'not installed'
if is_accelerate_available():
import accelerate
UpperCamelCase : Dict = accelerate.__version__
UpperCamelCase : List[str] = 'not installed'
if is_xformers_available():
import xformers
UpperCamelCase : List[str] = xformers.__version__
UpperCamelCase : Dict = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(SCREAMING_SNAKE_CASE_ ) )
return info
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 40 | 0 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class A_ ( unittest.TestCase ):
_A :str = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Tuple ):
lowercase = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
lowercase = VideoClassificationPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , top_k=2 )
lowercase = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
for example in examples:
lowercase = video_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{"""score""": ANY(SCREAMING_SNAKE_CASE_ ), """label""": ANY(SCREAMING_SNAKE_CASE_ )},
{"""score""": ANY(SCREAMING_SNAKE_CASE_ ), """label""": ANY(SCREAMING_SNAKE_CASE_ )},
] , )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowercase = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
lowercase = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
lowercase = pipeline(
"""video-classification""" , model=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , frame_sampling_rate=4 )
lowercase = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
lowercase = video_classifier(SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}] , )
lowercase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
] , )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
pass
| 428 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''▁'''
__UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
__UpperCAmelCase = {
'''facebook/xglm-564M''': 2_048,
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None:
UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCamelCase : Any = 7
UpperCamelCase : Optional[int] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
UpperCamelCase : Dict = kwargs.get('additional_special_tokens', [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase : Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
UpperCamelCase : Optional[int] = len(self.sp_model )
UpperCamelCase : Any = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
UpperCamelCase : int = self.__dict__.copy()
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : Any = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
UpperCamelCase : Any = {}
UpperCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCamelCase : Optional[int] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def snake_case_ ( self ) -> int:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_, out_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase : Union[str, Any] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Dict = ''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_, ' ' ).strip()
return out_string
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_, 'wb' ) as fi:
UpperCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 40 | 0 |
'''simple docstring'''
class __snake_case :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase : Tuple , lowerCamelCase : List[str] ) -> List[str]:
lowerCAmelCase_ : List[Any] = name
lowerCAmelCase_ : Tuple = val
def __str__( self : str ) -> List[str]:
return F'{self.__class__.__name__}({self.name}, {self.val})'
def __lt__( self : int , lowerCamelCase : Union[str, Any] ) -> int:
return self.val < other.val
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase : List[Any] ) -> Optional[int]:
lowerCAmelCase_ : List[Any] = {}
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : Optional[int] = self.build_heap(SCREAMING_SNAKE_CASE_ )
def __getitem__( self : Union[str, Any] , lowerCamelCase : Optional[Any] ) -> List[str]:
return self.get_value(SCREAMING_SNAKE_CASE_ )
def __lowercase ( self : Tuple , lowerCamelCase : List[Any] ) -> List[str]:
return (idx - 1) // 2
def __lowercase ( self : List[Any] , lowerCamelCase : int ) -> Optional[int]:
return idx * 2 + 1
def __lowercase ( self : Union[str, Any] , lowerCamelCase : Any ) -> Optional[Any]:
return idx * 2 + 2
def __lowercase ( self : int , lowerCamelCase : Any ) -> int:
return self.heap_dict[key]
def __lowercase ( self : int , lowerCamelCase : Union[str, Any] ) -> Any:
lowerCAmelCase_ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase_ : Optional[Any] = self.get_parent_idx(SCREAMING_SNAKE_CASE_ )
for idx, i in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : List[Any] = idx
lowerCAmelCase_ : Dict = i.val
for i in range(SCREAMING_SNAKE_CASE_ , -1 , -1 ):
self.sift_down(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return array
def __lowercase ( self : Dict , lowerCamelCase : int , lowerCamelCase : Optional[Any] ) -> Dict:
while True:
lowerCAmelCase_ : Optional[int] = self.get_left_child_idx(SCREAMING_SNAKE_CASE_ ) # noqa: E741
lowerCAmelCase_ : Union[str, Any] = self.get_right_child_idx(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = idx
if l < len(SCREAMING_SNAKE_CASE_ ) and array[l] < array[idx]:
lowerCAmelCase_ : Tuple = l
if r < len(SCREAMING_SNAKE_CASE_ ) and array[r] < array[smallest]:
lowerCAmelCase_ : Union[str, Any] = r
if smallest != idx:
lowerCAmelCase_ : Any = array[smallest], array[idx]
(
lowerCAmelCase_
) : str = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCAmelCase_ : Union[str, Any] = smallest
else:
break
def __lowercase ( self : Optional[int] , lowerCamelCase : Any ) -> str:
lowerCAmelCase_ : Union[str, Any] = self.get_parent_idx(SCREAMING_SNAKE_CASE_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCAmelCase_ : int = self.heap[idx], self.heap[p]
lowerCAmelCase_ : Optional[int] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCAmelCase_ : int = p
lowerCAmelCase_ : Optional[Any] = self.get_parent_idx(SCREAMING_SNAKE_CASE_ )
def __lowercase ( self : Optional[int] ) -> Optional[Any]:
return self.heap[0]
def __lowercase ( self : List[str] ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = self.heap[-1], self.heap[0]
lowerCAmelCase_ : Union[str, Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCAmelCase_ : Union[str, Any] = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def __lowercase ( self : Dict , lowerCamelCase : int ) -> Dict:
self.heap.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = len(self.heap ) - 1
lowerCAmelCase_ : Dict = node.val
self.sift_up(len(self.heap ) - 1 )
def __lowercase ( self : Dict ) -> Union[str, Any]:
return len(self.heap ) == 0
def __lowercase ( self : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : int ) -> Tuple:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCAmelCase_ : str = new_value
lowerCAmelCase_ : Tuple = new_value
self.sift_up(self.idx_of_element[node] )
__A : List[Any] = Node("R", -1)
__A : Tuple = Node("B", 6)
__A : Optional[int] = Node("A", 3)
__A : str = Node("X", 1)
__A : Union[str, Any] = Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__A : List[str] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
__UpperCAmelCase = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
UpperCAmelCase__ : Dict = RobertaTokenizer
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, errors=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, trim_offsets=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase : Dict = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('type' ) )
UpperCamelCase : List[str] = add_prefix_space
UpperCamelCase : Dict = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = add_prefix_space
UpperCamelCase : Optional[Any] = 'post_processor'
UpperCamelCase : Dict = getattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if tokenizer_component_instance:
UpperCamelCase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase : Optional[Any] = tuple(state['sep'] )
if "cls" in state:
UpperCamelCase : Optional[int] = tuple(state['cls'] )
UpperCamelCase : Any = False
if state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase : Optional[int] = add_prefix_space
UpperCamelCase : List[Any] = True
if state.get('trim_offsets', SCREAMING_SNAKE_CASE_ ) != trim_offsets:
UpperCamelCase : Dict = trim_offsets
UpperCamelCase : Union[str, Any] = True
if changes_to_apply:
UpperCamelCase : Tuple = getattr(SCREAMING_SNAKE_CASE_, state.pop('type' ) )
UpperCamelCase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE_ )
setattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else value
UpperCamelCase : List[Any] = value
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCamelCase : Optional[int] = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCamelCase : Dict = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCamelCase : Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Tuple:
UpperCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 40 | 0 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
a_ : Dict = logging.getLogger(__name__)
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : int=3 , _UpperCamelCase : int=16 , _UpperCamelCase : int = 10 , _UpperCamelCase : int = 2 ) -> Any:
'''simple docstring'''
def get_dataset(_UpperCamelCase : Dict ):
SCREAMING_SNAKE_CASE = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(snake_case__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
SCREAMING_SNAKE_CASE = get_dataset(snake_case__ )
SCREAMING_SNAKE_CASE = get_dataset(snake_case__ )
SCREAMING_SNAKE_CASE = DataLoader(snake_case__ , shuffle=snake_case__ , batch_size=snake_case__ , num_workers=4 )
SCREAMING_SNAKE_CASE = DataLoader(snake_case__ , shuffle=snake_case__ , batch_size=snake_case__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for epoch in range(snake_case__ ):
# Train quickly
model.train()
for batch in dataloader:
SCREAMING_SNAKE_CASE = batch
SCREAMING_SNAKE_CASE = model(snake_case__ )
SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(snake_case__ , snake_case__ )
accelerator.backward(snake_case__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class UpperCamelCase ( nn.Module ):
def __init__( self : str ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = nn.Parameter(torch.randn(1 ) )
SCREAMING_SNAKE_CASE = nn.Parameter(torch.randn(1 ) )
def UpperCamelCase ( self : List[str] , snake_case__ : Dict ):
"""simple docstring"""
return x * self.a + self.b
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def UpperCamelCase ( self : int ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE = dummy_dataloaders()
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator()
SCREAMING_SNAKE_CASE = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , 'initial' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
(SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
SCREAMING_SNAKE_CASE = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
(SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
# Train partially
set_seed(4_2 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = Accelerator()
SCREAMING_SNAKE_CASE = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
(SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , 'checkpoint' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
(SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( self : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
(SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
SCREAMING_SNAKE_CASE = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
(SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
# Train partially
set_seed(4_2 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , 'checkpoints' , 'checkpoint_0' ) )
(SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
(SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.tensor([1, 2, 3] )
SCREAMING_SNAKE_CASE = torch.tensor([2, 3, 4] )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(net.parameters() )
SCREAMING_SNAKE_CASE = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.99 )
SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
SCREAMING_SNAKE_CASE = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
a_ : Tuple = "/tmp/accelerate/state_checkpointing"
a_ : int = DummyModel()
a_ : Any = torch.optim.Adam(params=model.parameters(), lr=1e-3)
a_ : Optional[int] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
a_ , a_ : Any = dummy_dataloaders()
a_ : Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
a_ : Dict = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
a_ , a_ , a_ , a_ , a_ : Union[str, Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
a_ , a_ : Dict = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
a_ : str = group["params"][0].device
break
assert param_device.type == accelerator.device.type
a_ : int = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
a_ : Optional[Any] = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
a_ : str = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 439 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCAmelCase_ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
super().__init__(features=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = torch_tensor_kwargs
import torch # noqa import torch at initialization
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ):
return value.tolist()
UpperCamelCase : str = {}
if isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ):
UpperCamelCase : List[str] = {'dtype': torch.intaa}
elif isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ):
UpperCamelCase : int = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ):
UpperCamelCase : str = np.asarray(SCREAMING_SNAKE_CASE_ )
return torch.tensor(SCREAMING_SNAKE_CASE_, **{**default_dtype, **self.torch_tensor_kwargs} )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
import torch
# support for torch, tf, jax etc.
if hasattr(SCREAMING_SNAKE_CASE_, '__array__' ) and not isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ):
UpperCamelCase : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
return map_nested(self._recursive_tensorize, SCREAMING_SNAKE_CASE_, map_list=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : Dict = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> "torch.Tensor":
UpperCamelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_, pa_table.column_names[0] )
UpperCamelCase : Any = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = self._consolidate(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
for column_name in batch:
UpperCamelCase : str = self._consolidate(batch[column_name] )
return batch
| 40 | 0 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__a = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
__a = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
__a = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n"
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
return float((preds == labels).mean() )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
snake_case__ : str = simple_accuracy(snake_case__ , snake_case__ )
snake_case__ : List[Any] = float(fa_score(y_true=snake_case__ , y_pred=snake_case__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
snake_case__ : List[Any] = float(pearsonr(snake_case__ , snake_case__ )[0] )
snake_case__ : Optional[Any] = float(spearmanr(snake_case__ , snake_case__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase ( self : List[Any] ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowerCamelCase ( self : int , snake_case_ : Optional[int] , snake_case_ : int ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 374 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
UpperCamelCase : int = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(snake_case__ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCamelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(snake_case__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCamelCase : Dict = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(snake_case__ )
UpperCamelCase : List[Any] = []
for value in value_array:
UpperCamelCase : Optional[Any] = euclidean(snake_case__ , dataset[0] )
UpperCamelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCamelCase : Union[str, Any] = euclidean(snake_case__ , snake_case__ )
if dist > temp_dist:
UpperCamelCase : str = temp_dist
UpperCamelCase : List[str] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float:
return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 0 |
def A ( __UpperCamelCase , __UpperCamelCase ) -> list[int]:
A__ = int(snake_case__ )
# Initialize Result
A__ = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
SCREAMING_SNAKE_CASE__ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(f'Denomination {i}: ').strip()))
SCREAMING_SNAKE_CASE__ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
SCREAMING_SNAKE_CASE__ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
SCREAMING_SNAKE_CASE__ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(f'Following is minimal change for {value}: ')
SCREAMING_SNAKE_CASE__ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 9 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__UpperCAmelCase = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__UpperCAmelCase = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__UpperCAmelCase = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__UpperCAmelCase = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image)
__UpperCAmelCase = np.expand_dims(test_image, axis=0)
__UpperCAmelCase = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__UpperCAmelCase = '''Normal'''
if result[0][0] == 1:
__UpperCAmelCase = '''Abnormality detected'''
| 40 | 0 |
'''simple docstring'''
_lowercase = {
0: """0""",
1: """1""",
2: """2""",
3: """3""",
4: """4""",
5: """5""",
6: """6""",
7: """7""",
8: """8""",
9: """9""",
10: """a""",
11: """b""",
12: """c""",
13: """d""",
14: """e""",
15: """f""",
}
def lowerCamelCase__ ( a ):
assert type(snake_case__ ) in (int, float) and decimal == int(snake_case__ )
__snake_case = int(snake_case__ )
__snake_case = ''
__snake_case = False
if decimal < 0:
__snake_case = True
decimal *= -1
while decimal > 0:
__snake_case = divmod(snake_case__ , 16 )
__snake_case = values[remainder] + hexadecimal
__snake_case = '0x' + hexadecimal
if negative:
__snake_case = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
import os
import pytest
from attr import dataclass
__UpperCAmelCase = '''us-east-1''' # defaults region
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : str
UpperCAmelCase__ : Tuple = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
UpperCAmelCase__ : Union[str, Any] = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
UpperCAmelCase__ : Dict = {**hyperparameters, "max_steps": 1000}
@property
def snake_case_ ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ) -> str:
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ) -> str:
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def UpperCamelCase ( snake_case__ : Any ) -> Union[str, Any]:
UpperCamelCase : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 40 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 685 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__UpperCAmelCase = '''src/transformers'''
__UpperCAmelCase = '''docs/source/en/tasks'''
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Any ) -> Optional[int]:
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase : Optional[Any] = f.readlines()
# Find the start prompt.
UpperCamelCase : List[Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
UpperCamelCase : Optional[Any] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
__UpperCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__UpperCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase : Tuple = TASK_GUIDE_TO_MODELS[task_guide]
UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
UpperCamelCase : Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def UpperCamelCase ( snake_case__ : str , snake_case__ : Optional[int]=False ) -> Tuple:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
UpperCamelCase : Optional[Any] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
' to fix this.' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__UpperCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 40 | 0 |
__lowerCamelCase : Optional[Any] = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 629 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : int = IFPipeline
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case_ ( self ) -> str:
return self._get_dummy_components()
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA' )
def snake_case_ ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case_ ( self ) -> Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_local()
def snake_case_ ( self ) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def snake_case_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> List[Any]:
# if
UpperCamelCase : Union[str, Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0', variant='fp16', torch_dtype=torch.floataa )
UpperCamelCase : str = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0', variant='fp16', torch_dtype=torch.floataa, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
UpperCamelCase , UpperCamelCase : List[str] = pipe_a.encode_prompt('anime turtle', device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCamelCase : int = None
UpperCamelCase : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCamelCase : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
UpperCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCamelCase : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
UpperCamelCase : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
UpperCamelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : int = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : Dict = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = floats_tensor((1, 3, 256, 256), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 40 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Optional[int] = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 516 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def UpperCamelCase ( snake_case__ : Tuple="" ) -> str:
UpperCamelCase : Union[str, Any] = tempfile.mkdtemp()
return os.path.join(snake_case__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> int:
UpperCamelCase : Union[str, Any] = torch.rand(12, dtype=torch.floataa ) - 0.5
UpperCamelCase : Union[str, Any] = AgentAudio(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
# Ensure that the file contains the same value as the original tensor
UpperCamelCase , UpperCamelCase : Any = sf.read(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, torch.tensor(SCREAMING_SNAKE_CASE_ ), atol=1e-4 ) )
def snake_case_ ( self ) -> Any:
UpperCamelCase : Optional[int] = torch.rand(12, dtype=torch.floataa ) - 0.5
UpperCamelCase : Union[str, Any] = get_new_path(suffix='.wav' )
sf.write(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, 1_6000 )
UpperCamelCase : int = AgentAudio(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) )
self.assertEqual(agent_type.to_string(), SCREAMING_SNAKE_CASE_ )
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Any:
UpperCamelCase : Dict = torch.randint(0, 256, (64, 64, 3) )
UpperCamelCase : Union[str, Any] = AgentImage(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type._tensor, atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw(), Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
UpperCamelCase : Optional[int] = Image.open(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = AgentImage(SCREAMING_SNAKE_CASE_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
UpperCamelCase : Union[str, Any] = Image.open(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = AgentImage(SCREAMING_SNAKE_CASE_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = 'Hey!'
UpperCamelCase : Dict = AgentText(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_string() )
self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_raw() )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 40 | 0 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_SCREAMING_SNAKE_CASE = Lock()
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ) -> Optional[int]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
snake_case = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
snake_case = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
snake_case = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
snake_case = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] ) -> List[str]:
snake_case = []
snake_case = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
snake_case = temp_rs
snake_case = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
snake_case = temp_rs
snake_case = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
snake_case = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __lowerCamelCase ( ) -> Optional[Any]:
snake_case = list(range(10 , 0 , -1 ) )
print("""Initial List""" )
print(*snake_case__ )
snake_case = odd_even_transposition(snake_case__ )
print("""Sorted List\n""" )
print(*snake_case__ )
if __name__ == "__main__":
main()
| 369 |
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Any ) -> Union[str, Any]:
UpperCamelCase : int = [1]
for i in range(2 , snake_case__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCamelCase : List[Any] = []
UpperCamelCase : List[Any] = list(range(snake_case__ ) )
# Find permutation
while factorials:
UpperCamelCase : int = factorials.pop()
UpperCamelCase , UpperCamelCase : int = divmod(snake_case__ , snake_case__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 0 |
import requests
_lowerCAmelCase : Optional[int] = 'YOUR API KEY'
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : str = giphy_api_key ) -> list:
"""simple docstring"""
lowerCamelCase = '+'.join(query.split() )
lowerCamelCase = f'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
lowerCamelCase = requests.get(snake_case__ ).json()['data']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 246 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ ( a__ ):
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'width_multiplier' ) )
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_="swish", SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=0.25, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, ) -> Any:
UpperCamelCase : int = parent
UpperCamelCase : int = batch_size
UpperCamelCase : List[Any] = image_size
UpperCamelCase : List[str] = patch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : List[str] = make_divisible(512 * width_multiplier, divisor=8 )
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : Optional[int] = conv_kernel_size
UpperCamelCase : List[str] = output_stride
UpperCamelCase : Union[str, Any] = classifier_dropout_prob
UpperCamelCase : List[Any] = use_labels
UpperCamelCase : Any = is_training
UpperCamelCase : int = num_labels
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Tuple = scope
UpperCamelCase : List[str] = width_multiplier
UpperCamelCase : Any = ffn_dropout
UpperCamelCase : List[Any] = attn_dropout
def snake_case_ ( self ) -> int:
UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : List[str] = None
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCamelCase : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self ) -> int:
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : Any = MobileViTVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Optional[int] = self.num_labels
UpperCamelCase : Tuple = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Any = self.num_labels
UpperCamelCase : Optional[Any] = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = config_and_inputs
UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Tuple = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Any = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[Any] = False
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Dict = MobileViTVaModelTester(self )
UpperCamelCase : Optional[Any] = MobileViTVaConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def snake_case_ ( self ) -> int:
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def snake_case_ ( self ) -> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case_ ( self ) -> Any:
pass
def snake_case_ ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : str = [*signature.parameters.keys()]
UpperCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Tuple:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Tuple = outputs.hidden_states
UpperCamelCase : Dict = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCamelCase : Any = 2
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ) -> Optional[Any]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = MobileViTVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> str:
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.default_image_processor
UpperCamelCase : Any = prepare_img()
UpperCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : List[str] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : Any = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = outputs.logits
# verify the logits
UpperCamelCase : Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
], device=SCREAMING_SNAKE_CASE_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : str = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Optional[int] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Tuple = prepare_img()
UpperCamelCase : int = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : str = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = outputs.logits.detach().cpu()
UpperCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_, target_sizes=[(50, 60)] )
UpperCamelCase : Optional[int] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
| 40 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__SCREAMING_SNAKE_CASE : Any ={
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict =[
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 428 |
def UpperCamelCase ( snake_case__ : Optional[int] ) -> str:
UpperCamelCase : List[str] = [0] * len(snake_case__ )
UpperCamelCase : int = []
UpperCamelCase : Optional[int] = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
UpperCamelCase : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCamelCase : Tuple = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
__UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 40 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 275 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 40 | 0 |
import argparse
import os
import re
import packaging.version
a_ : int = "examples/"
a_ : List[Any] = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
a_ : Tuple = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
a_ : List[Any] = "README.md"
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple ) -> Dict:
'''simple docstring'''
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE = f.read()
SCREAMING_SNAKE_CASE = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE = replace.replace('VERSION' , snake_case__ )
SCREAMING_SNAKE_CASE = re_pattern.sub(snake_case__ , snake_case__ )
with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(snake_case__ )
def __lowerCAmelCase ( _UpperCamelCase : Any ) -> int:
'''simple docstring'''
for folder, directories, fnames in os.walk(snake_case__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(snake_case__ , snake_case__ ) , snake_case__ , pattern='examples' )
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : int=False ) -> Dict:
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(snake_case__ , snake_case__ , snake_case__ )
if not patch:
update_version_in_examples(snake_case__ )
def __lowerCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = '🤗 Transformers currently provides the following architectures'
SCREAMING_SNAKE_CASE = '1. Want to contribute a new model?'
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
SCREAMING_SNAKE_CASE = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(snake_case__ )
def __lowerCAmelCase ( ) -> Dict:
'''simple docstring'''
with open(REPLACE_FILES['init'] , 'r' ) as f:
SCREAMING_SNAKE_CASE = f.read()
SCREAMING_SNAKE_CASE = REPLACE_PATTERNS['init'][0].search(snake_case__ ).groups()[0]
return packaging.version.parse(snake_case__ )
def __lowerCAmelCase ( _UpperCamelCase : Any=False ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
SCREAMING_SNAKE_CASE = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE = input(f"""Which version are you releasing? [{default_version}]""" )
if len(snake_case__ ) == 0:
SCREAMING_SNAKE_CASE = default_version
print(f"""Updating version to {version}.""" )
global_version_update(snake_case__ , patch=snake_case__ )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def __lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_version()
SCREAMING_SNAKE_CASE = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
SCREAMING_SNAKE_CASE = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(snake_case__ ) == 0:
SCREAMING_SNAKE_CASE = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(snake_case__ )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
a_ : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 439 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 0 |
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
__a = True
from torch.cuda.amp import autocast
__a = logging.getLogger(__name__)
def __snake_case( _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Union[str, Any]:
return field(default_factory=lambda: default , metadata=snake_case__ )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowercase = field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowercase = field(
default=a__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowercase = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
lowercase = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
lowercase = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
lowercase = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
lowercase = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
lowercase = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = field(
default=a__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowercase = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
lowercase = field(
default=a__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowercase = field(
default=a__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowercase = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowercase = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
lowercase = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = 42
lowercase = True
lowercase = None
lowercase = None
lowercase = None
lowercase = None
def __call__( self : Tuple , snake_case_ : Tuple ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
snake_case__ : Any = [{'input_values': feature['input_values']} for feature in features]
snake_case__ : int = [{'input_ids': feature['labels']} for feature in features]
snake_case__ : Dict = self.processor.pad(
SCREAMING_SNAKE_CASE_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
snake_case__ : Optional[int] = self.processor.pad(
labels=SCREAMING_SNAKE_CASE_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="""pt""" , )
# replace padding with -100 to ignore loss correctly
snake_case__ : Optional[int] = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
snake_case__ : Dict = labels
return batch
class UpperCAmelCase_ ( a__ ):
"""simple docstring"""
def lowerCamelCase ( self : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[Any] ):
model.train()
snake_case__ : Optional[Any] = self._prepare_inputs(SCREAMING_SNAKE_CASE_ )
if self.use_amp:
with autocast():
snake_case__ : str = self.compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
snake_case__ : Any = self.compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
snake_case__ : Optional[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
snake_case__ : Optional[int] = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" )
if self.args.gradient_accumulation_steps > 1:
snake_case__ : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(SCREAMING_SNAKE_CASE_ ).backward()
elif self.use_apex:
with amp.scale_loss(SCREAMING_SNAKE_CASE_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(SCREAMING_SNAKE_CASE_ )
else:
loss.backward()
return loss.detach()
def __snake_case( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ : List[str] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
snake_case__ : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case__ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , snake_case__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
snake_case__ : Optional[int] = datasets.load_dataset(
"""common_voice""" , data_args.dataset_config_name , split=data_args.train_split_name )
snake_case__ : str = datasets.load_dataset("""common_voice""" , data_args.dataset_config_name , split="""test""" )
# Create and save tokenizer
snake_case__ : List[Any] = f"[{''.join(data_args.chars_to_ignore )}]"
def remove_special_characters(_lowerCAmelCase ):
snake_case__ : str = re.sub(snake_case__ , """""" , batch["""sentence"""] ).lower() + ' '
return batch
snake_case__ : List[Any] = train_dataset.map(snake_case__ , remove_columns=["""sentence"""] )
snake_case__ : Dict = eval_dataset.map(snake_case__ , remove_columns=["""sentence"""] )
def extract_all_chars(_lowerCAmelCase ):
snake_case__ : Union[str, Any] = ' '.join(batch["""text"""] )
snake_case__ : List[str] = list(set(snake_case__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
snake_case__ : Any = train_dataset.map(
snake_case__ , batched=snake_case__ , batch_size=-1 , keep_in_memory=snake_case__ , remove_columns=train_dataset.column_names , )
snake_case__ : List[Any] = train_dataset.map(
snake_case__ , batched=snake_case__ , batch_size=-1 , keep_in_memory=snake_case__ , remove_columns=eval_dataset.column_names , )
snake_case__ : Tuple = list(set(vocab_train["""vocab"""][0] ) | set(vocab_test["""vocab"""][0] ) )
snake_case__ : Optional[Any] = {v: k for k, v in enumerate(snake_case__ )}
snake_case__ : Optional[int] = vocab_dict[' ']
del vocab_dict[" "]
snake_case__ : Any = len(snake_case__ )
snake_case__ : Dict = len(snake_case__ )
with open("""vocab.json""" , """w""" ) as vocab_file:
json.dump(snake_case__ , snake_case__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ : List[Any] = WavaVecaCTCTokenizer(
"""vocab.json""" , unk_token="""[UNK]""" , pad_token="""[PAD]""" , word_delimiter_token="""|""" , )
snake_case__ : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0.0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ )
snake_case__ : Union[str, Any] = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
snake_case__ : str = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="""mean""" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
snake_case__ : Optional[Any] = min(len(snake_case__ ) , data_args.max_train_samples )
snake_case__ : Tuple = train_dataset.select(range(snake_case__ ) )
if data_args.max_val_samples is not None:
snake_case__ : Union[str, Any] = eval_dataset.select(range(data_args.max_val_samples ) )
snake_case__ : int = torchaudio.transforms.Resample(48_000 , 16_000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(_lowerCAmelCase ):
snake_case__ : Tuple = torchaudio.load(batch["""path"""] )
snake_case__ : Optional[Any] = resampler(snake_case__ ).squeeze().numpy()
snake_case__ : Dict = 16_000
snake_case__ : Optional[int] = batch['text']
return batch
snake_case__ : Optional[int] = train_dataset.map(
snake_case__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
snake_case__ : str = eval_dataset.map(
snake_case__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(_lowerCAmelCase ):
# check that all files have the correct sampling rate
assert (
len(set(batch["""sampling_rate"""] ) ) == 1
), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
snake_case__ : Dict = processor(
audio=batch["""speech"""] , text=batch["""target_text"""] , sampling_rate=batch["""sampling_rate"""][0] )
batch.update(snake_case__ )
return batch
snake_case__ : int = train_dataset.map(
snake_case__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , )
snake_case__ : Dict = eval_dataset.map(
snake_case__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
snake_case__ : List[str] = datasets.load_metric("""wer""" )
def compute_metrics(_lowerCAmelCase ):
snake_case__ : str = pred.predictions
snake_case__ : int = np.argmax(snake_case__ , axis=-1 )
snake_case__ : List[Any] = processor.tokenizer.pad_token_id
snake_case__ : int = processor.batch_decode(snake_case__ )
# we do not want to group tokens when computing the metrics
snake_case__ : Optional[int] = processor.batch_decode(pred.label_ids , group_tokens=snake_case__ )
snake_case__ : Dict = wer_metric.compute(predictions=snake_case__ , references=snake_case__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
snake_case__ : Optional[int] = DataCollatorCTCWithPadding(processor=snake_case__ , padding=snake_case__ )
# Initialize our Trainer
snake_case__ : Tuple = CTCTrainer(
model=snake_case__ , data_collator=snake_case__ , args=snake_case__ , compute_metrics=snake_case__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
snake_case__ : Dict = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
snake_case__ : List[str] = model_args.model_name_or_path
else:
snake_case__ : Union[str, Any] = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
snake_case__ : Tuple = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
snake_case__ : Dict = train_result.metrics
snake_case__ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case__ )
)
snake_case__ : Union[str, Any] = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics("""train""" , snake_case__ )
trainer.save_metrics("""train""" , snake_case__ )
trainer.save_state()
# Evaluation
snake_case__ : List[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case__ : int = trainer.evaluate()
snake_case__ : Optional[Any] = data_args.max_val_samples if data_args.max_val_samples is not None else len(snake_case__ )
snake_case__ : Optional[Any] = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics("""eval""" , snake_case__ )
trainer.save_metrics("""eval""" , snake_case__ )
return results
if __name__ == "__main__":
main()
| 374 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 0 |
from __future__ import annotations
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ = TypeVar('''T''')
class __lowerCAmelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple , _snake_case : Dict ):
"""simple docstring"""
A__ = data
A__ = self
A__ = 0
class __lowerCAmelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple ):
"""simple docstring"""
A__ = {}
def _a ( self : Optional[Any] , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = DisjointSetTreeNode(SCREAMING_SNAKE_CASE_ )
def _a ( self : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = self.map[data]
if elem_ref != elem_ref.parent:
A__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def _a ( self : str , _snake_case : List[Any] , _snake_case : Dict ):
"""simple docstring"""
if nodea.rank > nodea.rank:
A__ = nodea
else:
A__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def _a ( self : Union[str, Any] , _snake_case : str , _snake_case : Any ):
"""simple docstring"""
self.link(self.find_set(SCREAMING_SNAKE_CASE_ ) , self.find_set(SCREAMING_SNAKE_CASE_ ) )
class __lowerCAmelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : Union[str, Any] ):
"""simple docstring"""
A__ = {}
def _a ( self : str , _snake_case : Tuple ):
"""simple docstring"""
if node not in self.connections:
A__ = {}
def _a ( self : Optional[int] , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Dict ):
"""simple docstring"""
self.add_node(SCREAMING_SNAKE_CASE_ )
self.add_node(SCREAMING_SNAKE_CASE_ )
A__ = weight
A__ = weight
def _a ( self : List[str] ):
"""simple docstring"""
A__ = []
A__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _snake_case : x[2] )
# creating the disjoint set
A__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(SCREAMING_SNAKE_CASE_ )
# MST generation
A__ = 0
A__ = 0
A__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
A__ = edges[index]
index += 1
A__ = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
A__ = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
disjoint_set.union(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return graph
| 9 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''ViTFeatureExtractor''']
__UpperCAmelCase = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class a_ :
def __init__( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int]=1_3 , __lowerCAmelCase : str=7 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=False , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[Any]=1_9 , __lowerCAmelCase : Union[str, Any]=3_2 , __lowerCAmelCase : Union[str, Any]=5 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Optional[int]=3_7 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : int=5_1_2 , __lowerCAmelCase : List[str]=1_6 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Union[str, Any]=None , ):
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def lowercase__ ( self : Dict ):
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Union[str, Any] ):
__snake_case = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=SCREAMING_SNAKE_CASE_ , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ):
__snake_case = EsmForProteinFolding(config=SCREAMING_SNAKE_CASE_ ).float()
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
__snake_case = model(SCREAMING_SNAKE_CASE_ )
__snake_case = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowercase__ ( self : int ):
__snake_case = self.prepare_config_and_inputs()
(
__snake_case
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
lowercase_ : Tuple = False
lowercase_ : Tuple = (EsmForProteinFolding,) if is_torch_available() else ()
lowercase_ : Tuple = ()
lowercase_ : str = {} if is_torch_available() else {}
lowercase_ : Optional[int] = False
def lowercase__ ( self : str ):
__snake_case = EsmFoldModelTester(self )
__snake_case = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def lowercase__ ( self : str ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Tuple ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('Does not support attention outputs' )
def lowercase__ ( self : Dict ):
pass
@unittest.skip
def lowercase__ ( self : Optional[int] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase__ ( self : Optional[int] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase__ ( self : List[str] ):
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def lowercase__ ( self : List[str] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase__ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase__ ( self : Optional[int] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase__ ( self : Optional[Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase__ ( self : Dict ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase__ ( self : Any ):
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def lowercase__ ( self : List[str] ):
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def lowercase__ ( self : Union[str, Any] ):
pass
@unittest.skip('ESMFold only has one output format.' )
def lowercase__ ( self : Optional[Any] ):
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def lowercase__ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not support input chunking.' )
def lowercase__ ( self : str ):
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def lowercase__ ( self : Dict ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase__ ( self : Optional[Any] ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase__ ( self : Dict ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase__ ( self : Any ):
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def lowercase__ ( self : Optional[int] ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase__ ( self : Tuple ):
pass
@require_torch
class a_ ( a__ ):
@slow
def lowercase__ ( self : List[str] ):
__snake_case = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
__snake_case = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
__snake_case = model(SCREAMING_SNAKE_CASE_ )['positions']
__snake_case = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 356 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__UpperCAmelCase = random.Random()
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : str=1.0 , snake_case__ : int=None , snake_case__ : Union[str, Any]=None ) -> Any:
if rng is None:
UpperCamelCase : int = global_rng
UpperCamelCase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=2000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1_6000, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, ) -> List[str]:
UpperCamelCase : Dict = parent
UpperCamelCase : Dict = batch_size
UpperCamelCase : Any = min_seq_length
UpperCamelCase : Optional[int] = max_seq_length
UpperCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase : Tuple = feature_size
UpperCamelCase : Any = padding_value
UpperCamelCase : Tuple = sampling_rate
UpperCamelCase : Optional[Any] = return_attention_mask
UpperCamelCase : Optional[Any] = do_normalize
def snake_case_ ( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Any = WavaVecaFeatureExtractor
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = WavaVecaFeatureExtractionTester(self )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_, axis=0 ) - 1 ) < 1e-3 ) )
def snake_case_ ( self ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase : Any = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Dict = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase : List[Any] = feat_extract(speech_inputs[0], return_tensors='np' ).input_values
UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0], return_tensors='np' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test batched
UpperCamelCase : List[Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : int = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : Dict = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : Any = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = feat_extract(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Tuple = range(800, 1400, 200 )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in lengths]
UpperCamelCase : int = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = feat_extract(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : int = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='max_length', return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='longest', return_tensors='np' )
UpperCamelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=2000, padding='longest', return_tensors='np' )
UpperCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def snake_case_ ( self ) -> str:
import torch
UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}], return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase : Any = feature_extractor.pad([{'input_values': inputs}], return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def snake_case_ ( self ) -> Tuple:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
UpperCamelCase : int = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask, config.feat_extract_norm == 'layer' )
| 40 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ = logging.get_logger(__name__)
a_ = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( a__ , a__ ):
_UpperCAmelCase ="convnextv2"
def __init__( self: Any , a: List[Any]=3 , a: int=4 , a: Tuple=4 , a: Union[str, Any]=None , a: Union[str, Any]=None , a: str="gelu" , a: Tuple=0.02 , a: Dict=1e-12 , a: int=0.0 , a: int=2_24 , a: List[str]=None , a: Optional[int]=None , **a: Optional[int] , ) ->Tuple:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
a_ = num_channels
a_ = patch_size
a_ = num_stages
a_ = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
a_ = [3, 3, 9, 3] if depths is None else depths
a_ = hidden_act
a_ = initializer_range
a_ = layer_norm_eps
a_ = drop_path_rate
a_ = image_size
a_ = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(self.depths) + 1)]
a_ = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names)
| 685 |
def UpperCamelCase ( snake_case__ : int ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
UpperCamelCase : int = False
if num < 0:
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Tuple = -num
UpperCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
@dataclass
class A__ :
_UpperCAmelCase :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
_UpperCAmelCase :str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
_UpperCAmelCase :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_UpperCAmelCase :bool = field(
default=a__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.task_name.lower()
class A__ ( a__ ):
_UpperCAmelCase :Any = "train"
_UpperCAmelCase :List[Any] = "dev"
_UpperCAmelCase :Any = "test"
class A__ ( a__ ):
_UpperCAmelCase :GlueDataTrainingArguments
_UpperCAmelCase :str
_UpperCAmelCase :List[InputFeatures]
def __init__( self , A_ , A_ , A_ = None , A_ = Split.train , A_ = None , ):
'''simple docstring'''
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Dict = args
UpperCamelCase : Any = glue_processors[args.task_name]()
UpperCamelCase : str = glue_output_modes[args.task_name]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
try:
UpperCamelCase : Optional[int] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
UpperCamelCase : Dict = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
UpperCamelCase : int = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase : Optional[int] = label_list[2], label_list[1]
UpperCamelCase : Optional[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase : Optional[int] = cached_features_file + '.lock'
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not args.overwrite_cache:
UpperCamelCase : Tuple = time.time()
UpperCamelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
UpperCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCamelCase : List[str] = self.processor.get_test_examples(args.data_dir )
else:
UpperCamelCase : Optional[Any] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCamelCase : Optional[int] = examples[:limit_length]
UpperCamelCase : Optional[int] = glue_convert_examples_to_features(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , max_length=args.max_seq_length , label_list=SCREAMING_SNAKE_CASE_ , output_mode=self.output_mode , )
UpperCamelCase : List[Any] = time.time()
torch.save(self.features , SCREAMING_SNAKE_CASE_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , A_ ):
'''simple docstring'''
return self.features[i]
def __UpperCamelCase( self ):
'''simple docstring'''
return self.label_list
| 629 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__UpperCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None ) -> Optional[Any]:
# Recurse if needed
if "." in tensor_name:
UpperCamelCase : List[Any] = tensor_name.split('.' )
for split in splits[:-1]:
UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
UpperCamelCase : Dict = new_module
UpperCamelCase : int = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
UpperCamelCase : Union[str, Any] = tensor_name in module._buffers
UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
UpperCamelCase : Optional[Any] = False
UpperCamelCase : str = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase : List[str] = False
UpperCamelCase : Tuple = False
else:
UpperCamelCase : Union[str, Any] = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
UpperCamelCase : Optional[int] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase : List[Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase : Dict = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
UpperCamelCase : List[Any] = value.to('cpu' )
if value.dtype == torch.inta:
UpperCamelCase : Tuple = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
UpperCamelCase : Union[str, Any] = torch.tensor(snake_case__ , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
UpperCamelCase : Union[str, Any] = new_value.T
UpperCamelCase : Union[str, Any] = old_value.__dict__
if is_abit:
UpperCamelCase : Optional[Any] = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
UpperCamelCase : Optional[Any] = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
UpperCamelCase : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
UpperCamelCase : Union[str, Any] = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
UpperCamelCase : List[str] = value.to(snake_case__ )
else:
UpperCamelCase : Tuple = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
UpperCamelCase : Optional[int] = new_value
else:
UpperCamelCase : Tuple = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
UpperCamelCase : List[str] = new_value
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Any=None , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=False ) -> int:
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase : str = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase , UpperCamelCase : Tuple = module.weight.shape
else:
UpperCamelCase : Any = module.in_features
UpperCamelCase : List[str] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase : Any = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCamelCase : Optional[int] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase : str = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCamelCase : int = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase : Any = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase : Optional[int] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None ) -> Optional[Any]:
UpperCamelCase : Union[str, Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase : List[str] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCamelCase ( *snake_case__ : Tuple , **snake_case__ : List[str] ) -> List[str]:
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def UpperCamelCase ( *snake_case__ : Dict , **snake_case__ : str ) -> Tuple:
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple ) -> List[Any]:
UpperCamelCase : int = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase : List[str] = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase : Union[str, Any] = sum(snake_case__ , [] )
UpperCamelCase : Optional[int] = len(snake_case__ ) > 0
# Check if it is a base model
UpperCamelCase : str = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase : List[Any] = list(model.named_children() )
UpperCamelCase : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase : Union[str, Any] = set(snake_case__ ) - set(snake_case__ )
UpperCamelCase : Optional[int] = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
UpperCamelCase : Tuple = ['.weight', '.bias']
UpperCamelCase : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase : Optional[int] = name.replace(snake_case__ , '' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 40 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class lowerCAmelCase :
'''simple docstring'''
A = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
A = field(
default=a__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
A = field(
default=a__ , metadata={'help': 'The column name of the images in the files.'} )
A = field(default=a__ , metadata={'help': 'A folder containing the training data.'} )
A = field(default=a__ , metadata={'help': 'A folder containing the validation data.'} )
A = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
A = field(
default=a__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A = field(
default=a__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowerCamelCase__ ( self :str ) -> int:
"""simple docstring"""
UpperCamelCase__ = {}
if self.train_dir is not None:
UpperCamelCase__ = self.train_dir
if self.validation_dir is not None:
UpperCamelCase__ = self.validation_dir
UpperCamelCase__ = data_files if data_files else None
@dataclass
class lowerCAmelCase :
'''simple docstring'''
A = field(
default=a__ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
A = field(
default=a__ , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
A = field(
default=a__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
A = field(
default=a__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
A = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A = field(default=a__ , metadata={'help': 'Name or path of preprocessor config.'} )
A = field(
default=a__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
A = field(
default=a__ , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class lowerCAmelCase ( a__ ):
'''simple docstring'''
A = field(
default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
UpperCamelCase__ = torch.stack([example["pixel_values"] for example in examples] )
return {"pixel_values": pixel_values}
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mae" , snake_case__ , snake_case__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase__ = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case__ ) and data_args.train_val_split > 0.0:
UpperCamelCase__ = ds['train'].train_test_split(data_args.train_val_split )
UpperCamelCase__ = split['train']
UpperCamelCase__ = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase__ = ViTMAEConfig.from_pretrained(model_args.config_name , **snake_case__ )
elif model_args.model_name_or_path:
UpperCamelCase__ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
UpperCamelCase__ = ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase__ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case__ )
elif model_args.model_name_or_path:
UpperCamelCase__ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
UpperCamelCase__ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase__ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
UpperCamelCase__ = ViTMAEForPreTraining(snake_case__ )
if training_args.do_train:
UpperCamelCase__ = ds['train'].column_names
else:
UpperCamelCase__ = ds['validation'].column_names
if data_args.image_column_name is not None:
UpperCamelCase__ = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase__ = 'image'
elif "img" in column_names:
UpperCamelCase__ = 'img'
else:
UpperCamelCase__ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase__ = image_processor.size['shortest_edge']
else:
UpperCamelCase__ = (image_processor.size['height'], image_processor.size['width'])
UpperCamelCase__ = Compose(
[
Lambda(lambda _snake_case : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(snake_case__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_snake_case : List[Any] ):
UpperCamelCase__ = [transforms(snake_case__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
UpperCamelCase__ = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case__ )
# Compute absolute learning rate
UpperCamelCase__ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase__ = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , snake_case__ )
trainer.save_metrics("eval" , snake_case__ )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 516 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( snake_case__ : int ) -> Dict:
UpperCamelCase : Optional[Any] = tmp_path / 'file.csv'
UpperCamelCase : Optional[Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> List[str]:
UpperCamelCase : Optional[Any] = tmp_path / 'malformed_file.csv'
UpperCamelCase : Any = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> str:
UpperCamelCase : Any = tmp_path / 'csv_with_image.csv'
UpperCamelCase : Dict = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple:
UpperCamelCase : List[str] = tmp_path / 'csv_with_label.csv'
UpperCamelCase : Dict = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Dict ) -> List[str]:
UpperCamelCase : List[str] = tmp_path / 'csv_with_int_list.csv'
UpperCamelCase : Union[str, Any] = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[Any] ) -> List[Any]:
UpperCamelCase : str = Csv()
UpperCamelCase : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(snake_case__ , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(snake_case__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> Optional[int]:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : List[str] = f.read().splitlines()[1]
UpperCamelCase : int = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
UpperCamelCase : Any = csv._generate_tables([[csv_file_with_image]] )
UpperCamelCase : Any = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
UpperCamelCase : str = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( snake_case__ : Any ) -> str:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : Any = f.read().splitlines()[1:]
UpperCamelCase : Union[str, Any] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
UpperCamelCase : int = csv._generate_tables([[csv_file_with_label]] )
UpperCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
UpperCamelCase : List[str] = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(snake_case__ ) for label in labels]
def UpperCamelCase ( snake_case__ : str ) -> List[Any]:
UpperCamelCase : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda snake_case__ : [int(snake_case__ ) for i in x.split()]} )
UpperCamelCase : List[str] = csv._generate_tables([[csv_file_with_int_list]] )
UpperCamelCase : Union[str, Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
UpperCamelCase : str = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 40 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
_SCREAMING_SNAKE_CASE = {
"google/bigbird-roberta-base": 4096,
"google/bigbird-roberta-large": 4096,
"google/bigbird-base-trivia-itc": 4096,
}
_SCREAMING_SNAKE_CASE = "▁"
class _lowerCAmelCase ( a__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BigBirdTokenizer
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = []
def __init__( self : List[Any] , __snake_case : str=None , __snake_case : Any=None , __snake_case : int="<unk>" , __snake_case : int="<s>" , __snake_case : Dict="</s>" , __snake_case : Dict="<pad>" , __snake_case : Dict="[SEP]" , __snake_case : Union[str, Any]="[MASK]" , __snake_case : int="[CLS]" , **__snake_case : Tuple , )-> Optional[Any]:
snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
snake_case = vocab_file
snake_case = False if not self.vocab_file else True
def lowerCAmelCase ( self : List[Any] , __snake_case : str , __snake_case : Union[str, Any] = None )-> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self : Dict , __snake_case : Optional[int] , __snake_case : List[str] = None , __snake_case : List[str] = False )-> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def lowerCAmelCase ( self : Tuple , __snake_case : int , __snake_case : Union[str, Any] = None )-> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Dict , __snake_case : List[str] = None )-> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 369 |
import math
import random
def UpperCamelCase ( snake_case__ : float , snake_case__ : bool = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> float:
UpperCamelCase : Optional[Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(snake_case__ ):
# Forward propagation
UpperCamelCase : str = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCamelCase : int = (expected / 100) - layer_a
# Error delta
UpperCamelCase : List[str] = layer_1_error * sigmoid_function(snake_case__ , snake_case__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input('''Expected value: '''))
__UpperCAmelCase = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 40 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class lowerCAmelCase ( a__ ):
'''simple docstring'''
snake_case = "funnel"
snake_case = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self : Union[str, Any] , __snake_case : Optional[int]=30522 , __snake_case : Dict=[4, 4, 4] , __snake_case : Union[str, Any]=None , __snake_case : int=2 , __snake_case : str=768 , __snake_case : Union[str, Any]=12 , __snake_case : int=64 , __snake_case : Tuple=3072 , __snake_case : Union[str, Any]="gelu_new" , __snake_case : Optional[int]=0.1 , __snake_case : str=0.1 , __snake_case : int=0.0 , __snake_case : List[Any]=0.1 , __snake_case : List[Any]=None , __snake_case : Tuple=1e-9 , __snake_case : str="mean" , __snake_case : Optional[int]="relative_shift" , __snake_case : List[Any]=True , __snake_case : str=True , __snake_case : Any=True , **__snake_case : Optional[int] , ) -> Dict:
'''simple docstring'''
lowerCamelCase = vocab_size
lowerCamelCase = block_sizes
lowerCamelCase = [1] * len(SCREAMING_SNAKE_CASE_ ) if block_repeats is None else block_repeats
assert len(SCREAMING_SNAKE_CASE_ ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
lowerCamelCase = num_decoder_layers
lowerCamelCase = d_model
lowerCamelCase = n_head
lowerCamelCase = d_head
lowerCamelCase = d_inner
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout
lowerCamelCase = attention_dropout
lowerCamelCase = activation_dropout
lowerCamelCase = initializer_range
lowerCamelCase = initializer_std
lowerCamelCase = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
lowerCamelCase = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
lowerCamelCase = attention_type
lowerCamelCase = separate_cls
lowerCamelCase = truncate_seq
lowerCamelCase = pool_q_only
super().__init__(**SCREAMING_SNAKE_CASE_ )
@property
def lowerCamelCase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Dict ) -> Tuple:
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.block_sizes )
@num_blocks.setter
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' )
| 246 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase ( snake_case__ : Dict ) -> Optional[int]:
return EnvironmentCommand()
class lowerCAmelCase_ ( a__ ):
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : List[Any] = parser.add_parser('env' )
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = huggingface_hub.__version__
UpperCamelCase : int = 'not installed'
UpperCamelCase : Union[str, Any] = 'NA'
if is_torch_available():
import torch
UpperCamelCase : Any = torch.__version__
UpperCamelCase : str = torch.cuda.is_available()
UpperCamelCase : Dict = 'not installed'
if is_transformers_available():
import transformers
UpperCamelCase : str = transformers.__version__
UpperCamelCase : Optional[Any] = 'not installed'
if is_accelerate_available():
import accelerate
UpperCamelCase : Dict = accelerate.__version__
UpperCamelCase : List[str] = 'not installed'
if is_xformers_available():
import xformers
UpperCamelCase : List[str] = xformers.__version__
UpperCamelCase : Dict = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(SCREAMING_SNAKE_CASE_ ) )
return info
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 40 | 0 |
__SCREAMING_SNAKE_CASE : Optional[int] =[
'''Audio''',
'''Array2D''',
'''Array3D''',
'''Array4D''',
'''Array5D''',
'''ClassLabel''',
'''Features''',
'''Sequence''',
'''Value''',
'''Image''',
'''Translation''',
'''TranslationVariableLanguages''',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 428 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''▁'''
__UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
__UpperCAmelCase = {
'''facebook/xglm-564M''': 2_048,
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None:
UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCamelCase : Any = 7
UpperCamelCase : Optional[int] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
UpperCamelCase : Dict = kwargs.get('additional_special_tokens', [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase : Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
UpperCamelCase : Optional[int] = len(self.sp_model )
UpperCamelCase : Any = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
UpperCamelCase : int = self.__dict__.copy()
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : Any = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
UpperCamelCase : Any = {}
UpperCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCamelCase : Optional[int] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def snake_case_ ( self ) -> int:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_, out_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase : Union[str, Any] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Dict = ''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_, ' ' ).strip()
return out_string
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_, 'wb' ) as fi:
UpperCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 40 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : float , A__ : list[float] ):
'''simple docstring'''
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
lowerCAmelCase_ : Union[str, Any] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(snake_case__ ) )
return round(snake_case__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
__UpperCAmelCase = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
UpperCAmelCase__ : Dict = RobertaTokenizer
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, errors=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, trim_offsets=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase : Dict = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('type' ) )
UpperCamelCase : List[str] = add_prefix_space
UpperCamelCase : Dict = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = add_prefix_space
UpperCamelCase : Optional[Any] = 'post_processor'
UpperCamelCase : Dict = getattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if tokenizer_component_instance:
UpperCamelCase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase : Optional[Any] = tuple(state['sep'] )
if "cls" in state:
UpperCamelCase : Optional[int] = tuple(state['cls'] )
UpperCamelCase : Any = False
if state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase : Optional[int] = add_prefix_space
UpperCamelCase : List[Any] = True
if state.get('trim_offsets', SCREAMING_SNAKE_CASE_ ) != trim_offsets:
UpperCamelCase : Dict = trim_offsets
UpperCamelCase : Union[str, Any] = True
if changes_to_apply:
UpperCamelCase : Tuple = getattr(SCREAMING_SNAKE_CASE_, state.pop('type' ) )
UpperCamelCase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE_ )
setattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else value
UpperCamelCase : List[Any] = value
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCamelCase : Optional[int] = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCamelCase : Dict = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCamelCase : Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Tuple:
UpperCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 40 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = args.pruning_method
SCREAMING_SNAKE_CASE = args.threshold
SCREAMING_SNAKE_CASE = args.model_name_or_path.rstrip('/' )
SCREAMING_SNAKE_CASE = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
SCREAMING_SNAKE_CASE = torch.load(os.path.join(snake_case__ , 'pytorch_model.bin' ) )
SCREAMING_SNAKE_CASE = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
SCREAMING_SNAKE_CASE = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
SCREAMING_SNAKE_CASE = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
SCREAMING_SNAKE_CASE = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
SCREAMING_SNAKE_CASE = MagnitudeBinarizer.apply(inputs=snake_case__ , threshold=snake_case__ )
SCREAMING_SNAKE_CASE = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE = name[:-6]
SCREAMING_SNAKE_CASE = model[f"""{prefix_}mask_scores"""]
SCREAMING_SNAKE_CASE = TopKBinarizer.apply(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE = name[:-6]
SCREAMING_SNAKE_CASE = model[f"""{prefix_}mask_scores"""]
SCREAMING_SNAKE_CASE = ThresholdBinarizer.apply(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE = name[:-6]
SCREAMING_SNAKE_CASE = model[f"""{prefix_}mask_scores"""]
SCREAMING_SNAKE_CASE = -0.1, 1.1
SCREAMING_SNAKE_CASE = torch.sigmoid(snake_case__ )
SCREAMING_SNAKE_CASE = s * (r - l) + l
SCREAMING_SNAKE_CASE = s_bar.clamp(min=0.0 , max=1.0 )
SCREAMING_SNAKE_CASE = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
SCREAMING_SNAKE_CASE = os.path.join(
os.path.dirname(snake_case__ ) , f"""bertarized_{os.path.basename(snake_case__ )}""" )
if not os.path.isdir(snake_case__ ):
shutil.copytree(snake_case__ , snake_case__ )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(snake_case__ , os.path.join(snake_case__ , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
a_ : Dict = parser.parse_args()
main(args)
| 439 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCAmelCase_ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
super().__init__(features=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = torch_tensor_kwargs
import torch # noqa import torch at initialization
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ):
return value.tolist()
UpperCamelCase : str = {}
if isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ):
UpperCamelCase : List[str] = {'dtype': torch.intaa}
elif isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ):
UpperCamelCase : int = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ):
UpperCamelCase : str = np.asarray(SCREAMING_SNAKE_CASE_ )
return torch.tensor(SCREAMING_SNAKE_CASE_, **{**default_dtype, **self.torch_tensor_kwargs} )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
import torch
# support for torch, tf, jax etc.
if hasattr(SCREAMING_SNAKE_CASE_, '__array__' ) and not isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ):
UpperCamelCase : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
return map_nested(self._recursive_tensorize, SCREAMING_SNAKE_CASE_, map_list=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : Dict = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> "torch.Tensor":
UpperCamelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_, pa_table.column_names[0] )
UpperCamelCase : Any = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = self._consolidate(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
for column_name in batch:
UpperCamelCase : str = self._consolidate(batch[column_name] )
return batch
| 40 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( a__ , unittest.TestCase ):
"""simple docstring"""
lowercase = KandinskyVaaPipeline
lowercase = [
"image_embeds",
"negative_image_embeds",
]
lowercase = ["image_embeds", "negative_image_embeds"]
lowercase = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowercase = False
@property
def lowerCamelCase ( self : Dict ):
return 32
@property
def lowerCamelCase ( self : Optional[Any] ):
return 32
@property
def lowerCamelCase ( self : Tuple ):
return self.time_input_dim
@property
def lowerCamelCase ( self : List[str] ):
return self.time_input_dim * 4
@property
def lowerCamelCase ( self : List[str] ):
return 100
@property
def lowerCamelCase ( self : Any ):
torch.manual_seed(0 )
snake_case__ : List[str] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
snake_case__ : List[str] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def lowerCamelCase ( self : Optional[int] ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase ( self : int ):
torch.manual_seed(0 )
snake_case__ : str = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase ( self : Tuple ):
snake_case__ : int = self.dummy_unet
snake_case__ : Union[str, Any] = self.dummy_movq
snake_case__ : Tuple = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=SCREAMING_SNAKE_CASE_ , )
snake_case__ : Tuple = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : Dict=0 ):
snake_case__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
snake_case__ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
snake_case__ : int = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
snake_case__ : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
snake_case__ : Dict = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowerCamelCase ( self : int ):
snake_case__ : str = 'cpu'
snake_case__ : int = self.get_dummy_components()
snake_case__ : Optional[int] = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
snake_case__ : Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
snake_case__ : Optional[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
snake_case__ : Dict = output.images
snake_case__ : Tuple = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
snake_case__ : List[Any] = image[0, -3:, -3:, -1]
snake_case__ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ : Dict = np.array(
[0.6237976, 1.0, 0.36441332, 1.0, 0.70639634, 0.29877186, 0.85652125, 0.5216843, 0.54454046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy""" )
snake_case__ : Any = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE_ )
snake_case__ : Optional[Any] = KandinskyVaaPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
snake_case__ : int = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
snake_case__ : str = 'red cat, 4k photo'
snake_case__ : List[Any] = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case__ : str = pipe_prior(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
snake_case__ : int = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case__ : Any = pipeline(
image_embeds=SCREAMING_SNAKE_CASE_ , negative_image_embeds=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=100 , output_type="""np""" , )
snake_case__ : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 374 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
UpperCamelCase : int = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(snake_case__ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCamelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(snake_case__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCamelCase : Dict = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(snake_case__ )
UpperCamelCase : List[Any] = []
for value in value_array:
UpperCamelCase : Optional[Any] = euclidean(snake_case__ , dataset[0] )
UpperCamelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCamelCase : Union[str, Any] = euclidean(snake_case__ , snake_case__ )
if dist > temp_dist:
UpperCamelCase : str = temp_dist
UpperCamelCase : List[str] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float:
return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCAmelCase ( a__ ):
"""simple docstring"""
A__ : List[Any] = ["image_processor", "tokenizer"]
A__ : Optional[int] = "CLIPImageProcessor"
A__ : Any = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self : Tuple , _snake_case : Any=None , _snake_case : Tuple=None , **_snake_case : List[str] ):
"""simple docstring"""
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , SCREAMING_SNAKE_CASE_ , )
A__ = kwargs.pop('feature_extractor' )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __call__( self : Optional[int] , _snake_case : List[Any]=None , _snake_case : Optional[int]=None , _snake_case : Dict=None , **_snake_case : Optional[Any] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
A__ = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if images is not None:
A__ = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is not None and images is not None:
A__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ )
def _a ( self : Union[str, Any] , *_snake_case : str , **_snake_case : Any ):
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _a ( self : Union[str, Any] , *_snake_case : Dict , **_snake_case : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 9 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__UpperCAmelCase = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__UpperCAmelCase = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__UpperCAmelCase = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__UpperCAmelCase = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image)
__UpperCAmelCase = np.expand_dims(test_image, axis=0)
__UpperCAmelCase = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__UpperCAmelCase = '''Normal'''
if result[0][0] == 1:
__UpperCAmelCase = '''Abnormality detected'''
| 40 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( a , a , a , a , a ):
if index == number_of_items:
return 0
__snake_case = 0
__snake_case = 0
__snake_case = knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ , index + 1 )
if weights[index] <= max_weight:
__snake_case = values[index] + knapsack(
snake_case__ , snake_case__ , snake_case__ , max_weight - weights[index] , index + 1 )
return max(snake_case__ , snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
import os
import pytest
from attr import dataclass
__UpperCAmelCase = '''us-east-1''' # defaults region
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : str
UpperCAmelCase__ : Tuple = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
UpperCAmelCase__ : Union[str, Any] = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
UpperCAmelCase__ : Dict = {**hyperparameters, "max_steps": 1000}
@property
def snake_case_ ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ) -> str:
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ) -> str:
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def UpperCamelCase ( snake_case__ : Any ) -> Union[str, Any]:
UpperCamelCase : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 40 | 0 |
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE__ ( a__ ):
def __init__( self: List[Any] , a: str , a: int , a: str , a: Dict , ) ->List[str]:
'''simple docstring'''
super().__init__()
a_ = value_function
a_ = unet
a_ = scheduler
a_ = env
a_ = env.get_dataset()
a_ = {}
for key in self.data.keys():
try:
a_ = self.data[key].mean()
except: # noqa: E722
pass
a_ = {}
for key in self.data.keys():
try:
a_ = self.data[key].std()
except: # noqa: E722
pass
a_ = env.observation_space.shape[0]
a_ = env.action_space.shape[0]
def _lowerCAmelCase ( self: Any , a: str , a: List[str]) ->str:
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def _lowerCAmelCase ( self: Dict , a: Union[str, Any] , a: str) ->int:
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def _lowerCAmelCase ( self: Union[str, Any] , a: Dict) ->Dict:
'''simple docstring'''
if type(SCREAMING_SNAKE_CASE_) is dict:
return {k: self.to_torch(SCREAMING_SNAKE_CASE_) for k, v in x_in.items()}
elif torch.is_tensor(SCREAMING_SNAKE_CASE_):
return x_in.to(self.unet.device)
return torch.tensor(SCREAMING_SNAKE_CASE_ , device=self.unet.device)
def _lowerCAmelCase ( self: Optional[int] , a: Optional[int] , a: Tuple , a: int) ->Any:
'''simple docstring'''
for key, val in cond.items():
a_ = val.clone()
return x_in
def _lowerCAmelCase ( self: List[Any] , a: List[str] , a: Dict , a: Optional[int] , a: Any) ->Optional[int]:
'''simple docstring'''
a_ = x.shape[0]
a_ = None
for i in tqdm.tqdm(self.scheduler.timesteps):
# create batch of timesteps to pass into model
a_ = torch.full((batch_size,) , SCREAMING_SNAKE_CASE_ , device=self.unet.device , dtype=torch.long)
for _ in range(SCREAMING_SNAKE_CASE_):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
a_ = self.value_function(x.permute(0 , 2 , 1) , SCREAMING_SNAKE_CASE_).sample
a_ = torch.autograd.grad([y.sum()] , [x])[0]
a_ = self.scheduler._get_variance(SCREAMING_SNAKE_CASE_)
a_ = torch.exp(0.5 * posterior_variance)
a_ = model_std * grad
a_ = 0
a_ = x.detach()
a_ = x + scale * grad
a_ = self.reset_xa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.action_dim)
a_ = self.unet(x.permute(0 , 2 , 1) , SCREAMING_SNAKE_CASE_).sample.permute(0 , 2 , 1)
# TODO: verify deprecation of this kwarg
a_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , predict_epsilon=SCREAMING_SNAKE_CASE_)['prev_sample']
# apply conditions to the trajectory (set the initial state)
a_ = self.reset_xa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.action_dim)
a_ = self.to_torch(SCREAMING_SNAKE_CASE_)
return x, y
def __call__( self: Dict , a: List[str] , a: str=64 , a: Tuple=32 , a: str=2 , a: List[Any]=0.1) ->Dict:
'''simple docstring'''
a_ = self.normalize(SCREAMING_SNAKE_CASE_ , "observations")
a_ = obs[None].repeat(SCREAMING_SNAKE_CASE_ , axis=0)
a_ = {0: self.to_torch(SCREAMING_SNAKE_CASE_)}
a_ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
a_ = randn_tensor(SCREAMING_SNAKE_CASE_ , device=self.unet.device)
a_ = self.reset_xa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.action_dim)
a_ = self.to_torch(SCREAMING_SNAKE_CASE_)
# run the diffusion process
a_ = self.run_diffusion(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# sort output trajectories by value
a_ = y.argsort(0 , descending=SCREAMING_SNAKE_CASE_).squeeze()
a_ = x[sorted_idx]
a_ = sorted_values[:, :, : self.action_dim]
a_ = actions.detach().cpu().numpy()
a_ = self.de_normalize(SCREAMING_SNAKE_CASE_ , key="actions")
# select the action with the highest value
if y is not None:
a_ = 0
else:
# if we didn't run value guiding, select a random action
a_ = np.random.randint(0 , SCREAMING_SNAKE_CASE_)
a_ = denorm_actions[selected_index, 0]
return denorm_actions
| 685 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__UpperCAmelCase = '''src/transformers'''
__UpperCAmelCase = '''docs/source/en/tasks'''
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Any ) -> Optional[int]:
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase : Optional[Any] = f.readlines()
# Find the start prompt.
UpperCamelCase : List[Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
UpperCamelCase : Optional[Any] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
__UpperCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__UpperCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase : Tuple = TASK_GUIDE_TO_MODELS[task_guide]
UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
UpperCamelCase : Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def UpperCamelCase ( snake_case__ : str , snake_case__ : Optional[int]=False ) -> Tuple:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
UpperCamelCase : Optional[Any] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
' to fix this.' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__UpperCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 40 | 0 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__lowerCamelCase : str = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__lowerCamelCase : Union[str, Any] = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__lowerCamelCase : List[Any] = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def __UpperCamelCase( self , A_ , A_ , A_=4 , A_=False ):
'''simple docstring'''
UpperCamelCase : int = compute_bleu(
reference_corpus=SCREAMING_SNAKE_CASE_ , translation_corpus=SCREAMING_SNAKE_CASE_ , max_order=SCREAMING_SNAKE_CASE_ , smooth=SCREAMING_SNAKE_CASE_ )
(UpperCamelCase) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 629 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : int = IFPipeline
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case_ ( self ) -> str:
return self._get_dummy_components()
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA' )
def snake_case_ ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case_ ( self ) -> Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_local()
def snake_case_ ( self ) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def snake_case_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> List[Any]:
# if
UpperCamelCase : Union[str, Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0', variant='fp16', torch_dtype=torch.floataa )
UpperCamelCase : str = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0', variant='fp16', torch_dtype=torch.floataa, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
UpperCamelCase , UpperCamelCase : List[str] = pipe_a.encode_prompt('anime turtle', device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCamelCase : int = None
UpperCamelCase : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCamelCase : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
UpperCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCamelCase : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
UpperCamelCase : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
UpperCamelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : int = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : Dict = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = floats_tensor((1, 3, 256, 256), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 40 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : str = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 516 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def UpperCamelCase ( snake_case__ : Tuple="" ) -> str:
UpperCamelCase : Union[str, Any] = tempfile.mkdtemp()
return os.path.join(snake_case__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> int:
UpperCamelCase : Union[str, Any] = torch.rand(12, dtype=torch.floataa ) - 0.5
UpperCamelCase : Union[str, Any] = AgentAudio(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
# Ensure that the file contains the same value as the original tensor
UpperCamelCase , UpperCamelCase : Any = sf.read(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, torch.tensor(SCREAMING_SNAKE_CASE_ ), atol=1e-4 ) )
def snake_case_ ( self ) -> Any:
UpperCamelCase : Optional[int] = torch.rand(12, dtype=torch.floataa ) - 0.5
UpperCamelCase : Union[str, Any] = get_new_path(suffix='.wav' )
sf.write(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, 1_6000 )
UpperCamelCase : int = AgentAudio(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) )
self.assertEqual(agent_type.to_string(), SCREAMING_SNAKE_CASE_ )
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Any:
UpperCamelCase : Dict = torch.randint(0, 256, (64, 64, 3) )
UpperCamelCase : Union[str, Any] = AgentImage(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type._tensor, atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw(), Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
UpperCamelCase : Optional[int] = Image.open(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = AgentImage(SCREAMING_SNAKE_CASE_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
UpperCamelCase : Union[str, Any] = Image.open(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = AgentImage(SCREAMING_SNAKE_CASE_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = 'Hey!'
UpperCamelCase : Dict = AgentText(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_string() )
self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_raw() )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 40 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class _lowerCAmelCase ( a__ ):
"""simple docstring"""
snake_case_ = "bloom"
snake_case_ = ["past_key_values"]
snake_case_ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : List[str] , __snake_case : Optional[Any]=25_08_80 , __snake_case : Optional[Any]=64 , __snake_case : Optional[int]=2 , __snake_case : Optional[Any]=8 , __snake_case : str=1e-5 , __snake_case : List[str]=0.02 , __snake_case : Tuple=True , __snake_case : List[str]=1 , __snake_case : Optional[Any]=2 , __snake_case : Optional[int]=False , __snake_case : Union[str, Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Union[str, Any]=1 , __snake_case : int=False , **__snake_case : str , )-> Dict:
snake_case = vocab_size
# Backward compatibility with n_embed kwarg
snake_case = kwargs.pop("""n_embed""" , SCREAMING_SNAKE_CASE_ )
snake_case = hidden_size if n_embed is None else n_embed
snake_case = n_layer
snake_case = n_head
snake_case = layer_norm_epsilon
snake_case = initializer_range
snake_case = use_cache
snake_case = pretraining_tp
snake_case = apply_residual_connection_post_layernorm
snake_case = hidden_dropout
snake_case = attention_dropout
snake_case = bos_token_id
snake_case = eos_token_id
snake_case = slow_but_exact
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class _lowerCAmelCase ( a__ ):
"""simple docstring"""
snake_case_ = version.parse("1.12" )
def __init__( self : Any , __snake_case : List[Any] , __snake_case : List[str] = "default" , __snake_case : Tuple = None , __snake_case : str = False , )-> Dict:
super().__init__(SCREAMING_SNAKE_CASE_ , task=SCREAMING_SNAKE_CASE_ , patching_specs=SCREAMING_SNAKE_CASE_ , use_past=SCREAMING_SNAKE_CASE_ )
if not getattr(self._config , """pad_token_id""" , SCREAMING_SNAKE_CASE_ ):
# TODO: how to do that better?
snake_case = 0
@property
def lowerCAmelCase ( self : Union[str, Any] )-> Mapping[str, Mapping[int, str]]:
snake_case = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction="""inputs""" , inverted_values_shape=SCREAMING_SNAKE_CASE_ )
snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowerCAmelCase ( self : List[str] )-> int:
return self._config.n_layer
@property
def lowerCAmelCase ( self : Dict )-> int:
return self._config.n_head
@property
def lowerCAmelCase ( self : Any )-> float:
return 1e-3
def lowerCAmelCase ( self : Dict , __snake_case : str , __snake_case : List[str] = -1 , __snake_case : List[Any] = -1 , __snake_case : Dict = False , __snake_case : List[str] = None , )-> Mapping[str, Any]:
snake_case = super(SCREAMING_SNAKE_CASE_ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
# We need to order the input in the way they appears in the forward()
snake_case = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
snake_case = seqlen + 2
snake_case = self._config.hidden_size // self.num_attention_heads
snake_case = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
snake_case = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
snake_case = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers )
]
snake_case = common_inputs['attention_mask']
if self.use_past:
snake_case = ordered_inputs['attention_mask'].dtype
snake_case = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self : int )-> int:
return 13
| 369 |
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Any ) -> Union[str, Any]:
UpperCamelCase : int = [1]
for i in range(2 , snake_case__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCamelCase : List[Any] = []
UpperCamelCase : List[Any] = list(range(snake_case__ ) )
# Find permutation
while factorials:
UpperCamelCase : int = factorials.pop()
UpperCamelCase , UpperCamelCase : int = divmod(snake_case__ , snake_case__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class lowerCAmelCase ( a__ ):
'''simple docstring'''
snake_case = "decision_transformer"
snake_case = ["past_key_values"]
snake_case = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , __snake_case : int=17 , __snake_case : Any=4 , __snake_case : List[Any]=128 , __snake_case : Optional[Any]=4096 , __snake_case : str=True , __snake_case : List[Any]=1 , __snake_case : Union[str, Any]=1024 , __snake_case : Dict=3 , __snake_case : int=1 , __snake_case : List[str]=None , __snake_case : Dict="relu" , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Dict=0.1 , __snake_case : Optional[Any]=1e-5 , __snake_case : Union[str, Any]=0.02 , __snake_case : Optional[Any]=True , __snake_case : Any=True , __snake_case : List[Any]=50256 , __snake_case : List[str]=50256 , __snake_case : Dict=False , __snake_case : str=False , **__snake_case : str , ) -> str:
'''simple docstring'''
lowerCamelCase = state_dim
lowerCamelCase = act_dim
lowerCamelCase = hidden_size
lowerCamelCase = max_ep_len
lowerCamelCase = action_tanh
lowerCamelCase = vocab_size
lowerCamelCase = n_positions
lowerCamelCase = n_layer
lowerCamelCase = n_head
lowerCamelCase = n_inner
lowerCamelCase = activation_function
lowerCamelCase = resid_pdrop
lowerCamelCase = embd_pdrop
lowerCamelCase = attn_pdrop
lowerCamelCase = layer_norm_epsilon
lowerCamelCase = initializer_range
lowerCamelCase = scale_attn_weights
lowerCamelCase = use_cache
lowerCamelCase = scale_attn_by_inverse_layer_idx
lowerCamelCase = reorder_and_upcast_attn
lowerCamelCase = bos_token_id
lowerCamelCase = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 246 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ ( a__ ):
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'width_multiplier' ) )
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_="swish", SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=0.25, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, ) -> Any:
UpperCamelCase : int = parent
UpperCamelCase : int = batch_size
UpperCamelCase : List[Any] = image_size
UpperCamelCase : List[str] = patch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : List[str] = make_divisible(512 * width_multiplier, divisor=8 )
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : Optional[int] = conv_kernel_size
UpperCamelCase : List[str] = output_stride
UpperCamelCase : Union[str, Any] = classifier_dropout_prob
UpperCamelCase : List[Any] = use_labels
UpperCamelCase : Any = is_training
UpperCamelCase : int = num_labels
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Tuple = scope
UpperCamelCase : List[str] = width_multiplier
UpperCamelCase : Any = ffn_dropout
UpperCamelCase : List[Any] = attn_dropout
def snake_case_ ( self ) -> int:
UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : List[str] = None
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCamelCase : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self ) -> int:
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : Any = MobileViTVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Optional[int] = self.num_labels
UpperCamelCase : Tuple = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Any = self.num_labels
UpperCamelCase : Optional[Any] = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = config_and_inputs
UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Tuple = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Any = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[Any] = False
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Dict = MobileViTVaModelTester(self )
UpperCamelCase : Optional[Any] = MobileViTVaConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def snake_case_ ( self ) -> int:
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def snake_case_ ( self ) -> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case_ ( self ) -> Any:
pass
def snake_case_ ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : str = [*signature.parameters.keys()]
UpperCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Tuple:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Tuple = outputs.hidden_states
UpperCamelCase : Dict = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCamelCase : Any = 2
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ) -> Optional[Any]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = MobileViTVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> str:
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.default_image_processor
UpperCamelCase : Any = prepare_img()
UpperCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : List[str] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : Any = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = outputs.logits
# verify the logits
UpperCamelCase : Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
], device=SCREAMING_SNAKE_CASE_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : str = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Optional[int] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Tuple = prepare_img()
UpperCamelCase : int = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : str = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = outputs.logits.detach().cpu()
UpperCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_, target_sizes=[(50, 60)] )
UpperCamelCase : Optional[int] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
| 40 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A_ ( a__ ):
_A :jnp.ndarray
_A :jnp.ndarray
class A_ ( nn.Module ):
_A :int
_A :Tuple[int] = (16, 32, 96, 256)
_A :jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowercase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
lowercase = self.block_out_channels[i]
lowercase = self.block_out_channels[i + 1]
lowercase = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(SCREAMING_SNAKE_CASE_ )
lowercase = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(SCREAMING_SNAKE_CASE_ )
lowercase = blocks
lowercase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Optional[int] , snake_case__ : List[Any] ):
lowercase = self.conv_in(SCREAMING_SNAKE_CASE_ )
lowercase = nn.silu(SCREAMING_SNAKE_CASE_ )
for block in self.blocks:
lowercase = block(SCREAMING_SNAKE_CASE_ )
lowercase = nn.silu(SCREAMING_SNAKE_CASE_ )
lowercase = self.conv_out(SCREAMING_SNAKE_CASE_ )
return embedding
@flax_register_to_config
class A_ ( nn.Module , a__ , a__ ):
_A :int = 32
_A :int = 4
_A :Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_A :Union[bool, Tuple[bool]] = False
_A :Tuple[int] = (320, 640, 1280, 1280)
_A :int = 2
_A :Union[int, Tuple[int]] = 8
_A :Optional[Union[int, Tuple[int]]] = None
_A :int = 1280
_A :float = 0.0
_A :bool = False
_A :jnp.dtype = jnp.floataa
_A :bool = True
_A :int = 0
_A :str = "rgb"
_A :Tuple[int] = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : Any ):
# init input tensors
lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase = jnp.zeros(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa )
lowercase = jnp.ones((1,) , dtype=jnp.intaa )
lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase = jnp.zeros(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa )
lowercase = jax.random.split(SCREAMING_SNAKE_CASE_ )
lowercase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )["params"]
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowercase = self.block_out_channels
lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase = self.num_attention_heads or self.attention_head_dim
# input
lowercase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase = FlaxTimestepEmbedding(SCREAMING_SNAKE_CASE_ , dtype=self.dtype )
lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase = self.only_cross_attention
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase = []
lowercase = []
lowercase = block_out_channels[0]
lowercase = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(SCREAMING_SNAKE_CASE_ )
for i, down_block_type in enumerate(self.down_block_types ):
lowercase = output_channel
lowercase = block_out_channels[i]
lowercase = i == len(SCREAMING_SNAKE_CASE_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase = FlaxCrossAttnDownBlockaD(
in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase = FlaxDownBlockaD(
in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(SCREAMING_SNAKE_CASE_ )
for _ in range(self.layers_per_block ):
lowercase = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(SCREAMING_SNAKE_CASE_ )
if not is_final_block:
lowercase = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(SCREAMING_SNAKE_CASE_ )
lowercase = down_blocks
lowercase = controlnet_down_blocks
# mid
lowercase = block_out_channels[-1]
lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=SCREAMING_SNAKE_CASE_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[Any] = 1.0 , snake_case__ : Any = True , snake_case__ : Tuple = False , ):
lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase = jnp.flip(SCREAMING_SNAKE_CASE_ , axis=1 )
# 1. time
if not isinstance(SCREAMING_SNAKE_CASE_ , jnp.ndarray ):
lowercase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(SCREAMING_SNAKE_CASE_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase = timesteps.astype(dtype=jnp.floataa )
lowercase = jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 0 )
lowercase = self.time_proj(SCREAMING_SNAKE_CASE_ )
lowercase = self.time_embedding(SCREAMING_SNAKE_CASE_ )
# 2. pre-process
lowercase = jnp.transpose(SCREAMING_SNAKE_CASE_ , (0, 2, 3, 1) )
lowercase = self.conv_in(SCREAMING_SNAKE_CASE_ )
lowercase = jnp.transpose(SCREAMING_SNAKE_CASE_ , (0, 2, 3, 1) )
lowercase = self.controlnet_cond_embedding(SCREAMING_SNAKE_CASE_ )
sample += controlnet_cond
# 3. down
lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase = down_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , deterministic=not train )
else:
lowercase = down_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowercase = self.mid_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , deterministic=not train )
# 5. contronet blocks
lowercase = ()
for down_block_res_sample, controlnet_block in zip(SCREAMING_SNAKE_CASE_ , self.controlnet_down_blocks ):
lowercase = controlnet_block(SCREAMING_SNAKE_CASE_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase = controlnet_down_block_res_samples
lowercase = self.controlnet_mid_block(SCREAMING_SNAKE_CASE_ )
# 6. scaling
lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=SCREAMING_SNAKE_CASE_ , mid_block_res_sample=SCREAMING_SNAKE_CASE_ )
| 428 |
def UpperCamelCase ( snake_case__ : Optional[int] ) -> str:
UpperCamelCase : List[str] = [0] * len(snake_case__ )
UpperCamelCase : int = []
UpperCamelCase : Optional[int] = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
UpperCamelCase : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCamelCase : Tuple = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
__UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 40 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__A : Optional[Any] = ["bert-base-uncased", "bert-base-cased"]
__A : str = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class __snake_case ( tf.keras.Model):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase : str ) -> List[str]:
super().__init__()
lowerCAmelCase_ : Optional[int] = tokenizer
lowerCAmelCase_ : List[str] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = TFAutoModel.from_config(SCREAMING_SNAKE_CASE_ )
def __lowercase ( self : List[str] , lowerCamelCase : Union[str, Any] ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = self.bert(**SCREAMING_SNAKE_CASE_ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : Tuple ) -> Dict:
super().setUp()
lowerCAmelCase_ : Optional[Any] = [
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
lowerCAmelCase_ : Optional[Any] = [TFBertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , use_fast_bert_tokenizer=SCREAMING_SNAKE_CASE_ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCAmelCase_ : str = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
lowerCAmelCase_ : Optional[int] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __lowercase ( self : List[Any] ) -> Optional[Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCAmelCase_ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""tf""" , padding="""longest""" )
lowerCAmelCase_ : int = tf_tokenizer(SCREAMING_SNAKE_CASE_ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def __lowercase ( self : Tuple ) -> int:
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase_ : Tuple = tf_tokenizer(self.paired_sentences )
lowerCAmelCase_ : Union[str, Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def __lowercase ( self : Dict ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase_ : Optional[Any] = tf.function(SCREAMING_SNAKE_CASE_ )
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCAmelCase_ : Dict = tf.constant(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = compiled_tokenizer(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = tf_tokenizer(SCREAMING_SNAKE_CASE_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __lowercase ( self : List[str] ) -> Tuple:
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase_ : Any = ModelToSave(tokenizer=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = tf.convert_to_tensor(self.test_sentences )
lowerCAmelCase_ : Optional[Any] = model(SCREAMING_SNAKE_CASE_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCAmelCase_ : Dict = Path(SCREAMING_SNAKE_CASE_ ) / 'saved.model'
model.save(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = tf.keras.models.load_model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = loaded_model(SCREAMING_SNAKE_CASE_ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 275 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 40 | 0 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
a_ : Any = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
a_ : str = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
a_ : Dict = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
a_ : List[str] = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
a_ : Optional[int] = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
a_ : Dict = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
a_ : List[Any] = tf.keras.preprocessing.image.img_to_array(test_image)
a_ : List[Any] = np.expand_dims(test_image, axis=0)
a_ : Union[str, Any] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
a_ : Optional[int] = "Normal"
if result[0][0] == 1:
a_ : Union[str, Any] = "Abnormality detected"
| 439 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 0 |
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class UpperCAmelCase_ ( a__ ):
"""simple docstring"""
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , a__ , )
| 374 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 0 |
from __future__ import annotations
import math
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Any ):
"""simple docstring"""
A__ = size
# approximate the overall size of segment tree with given value
A__ = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
A__ = [0 for i in range(0 , 4 * size )]
A__ = [0 for i in range(0 , 4 * size )] # flag for lazy update
def _a ( self : Any , _snake_case : str ):
"""simple docstring"""
return idx * 2
def _a ( self : int , _snake_case : int ):
"""simple docstring"""
return idx * 2 + 1
def _a ( self : Tuple , _snake_case : List[str] , _snake_case : Any , _snake_case : List[str] , _snake_case : Optional[int] ):
"""simple docstring"""
if left_element == right_element:
A__ = a[left_element - 1]
else:
A__ = (left_element + right_element) // 2
self.build(self.left(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.build(self.right(SCREAMING_SNAKE_CASE_ ) , mid + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = max(
self.segment_tree[self.left(SCREAMING_SNAKE_CASE_ )] , self.segment_tree[self.right(SCREAMING_SNAKE_CASE_ )] )
def _a ( self : Tuple , _snake_case : List[str] , _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Tuple ):
"""simple docstring"""
if self.flag[idx] is True:
A__ = self.lazy[idx]
A__ = False
if left_element != right_element:
A__ = self.lazy[idx]
A__ = self.lazy[idx]
A__ = True
A__ = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
A__ = val
if left_element != right_element:
A__ = val
A__ = val
A__ = True
A__ = True
return True
A__ = (left_element + right_element) // 2
self.update(self.left(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.update(self.right(SCREAMING_SNAKE_CASE_ ) , mid + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = max(
self.segment_tree[self.left(SCREAMING_SNAKE_CASE_ )] , self.segment_tree[self.right(SCREAMING_SNAKE_CASE_ )] )
return True
def _a ( self : List[str] , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : List[Any] ):
"""simple docstring"""
if self.flag[idx] is True:
A__ = self.lazy[idx]
A__ = False
if left_element != right_element:
A__ = self.lazy[idx]
A__ = self.lazy[idx]
A__ = True
A__ = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
A__ = (left_element + right_element) // 2
A__ = self.query(self.left(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = self.query(self.right(SCREAMING_SNAKE_CASE_ ) , mid + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __str__( self : str ):
"""simple docstring"""
return str([self.query(1 , 1 , self.size , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
SCREAMING_SNAKE_CASE__ = 1_5
SCREAMING_SNAKE_CASE__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 9 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''ViTFeatureExtractor''']
__UpperCAmelCase = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 0 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_lowercase = """<<<<<<< This should probably be modified because it mentions: """
_lowercase = """=======
>>>>>>>
"""
_lowercase = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
_lowercase = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value(\'\1\')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value(\'string\')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value(\'string\'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def lowerCamelCase__ ( a ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class a_ ( a__ ):
@staticmethod
def lowercase__ ( __lowerCAmelCase : Union[str, Any] ):
__snake_case = parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def __init__( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , *__lowerCAmelCase : Union[str, Any] ):
__snake_case = get_logger('datasets-cli/converting' )
__snake_case = tfds_path
__snake_case = datasets_directory
def lowercase__ ( self : int ):
if os.path.isdir(self._tfds_path ):
__snake_case = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__snake_case = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
__snake_case = os.path.abspath(self._datasets_directory )
self._logger.info(F'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__snake_case = []
__snake_case = []
__snake_case = {}
if os.path.isdir(self._tfds_path ):
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
else:
__snake_case = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'Looking at file {f_name}' )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not os.path.isfile(SCREAMING_SNAKE_CASE_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as f:
__snake_case = f.readlines()
__snake_case = []
__snake_case = False
__snake_case = False
__snake_case = []
for line in lines:
__snake_case = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__snake_case = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
__snake_case = ''
continue
elif "from absl import logging" in out_line:
__snake_case = 'from datasets import logging\n'
elif "getLogger" in out_line:
__snake_case = out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__snake_case = True
__snake_case = list(filter(lambda __lowerCAmelCase : e in out_line , SCREAMING_SNAKE_CASE_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(SCREAMING_SNAKE_CASE_ ) + '\n' )
out_lines.append(SCREAMING_SNAKE_CASE_ )
out_lines.append(SCREAMING_SNAKE_CASE_ )
continue
else:
for pattern, replacement in TO_CONVERT:
__snake_case = re.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__snake_case = re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , SCREAMING_SNAKE_CASE_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
__snake_case = 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__snake_case = True
out_lines.append(SCREAMING_SNAKE_CASE_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__snake_case = f_name.replace('.py' , '' )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
self._logger.info(F'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(SCREAMING_SNAKE_CASE_ )
if needs_manual_update:
with_manual_update.append(SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
self._logger.info(F'Converted in {output_file}' )
for utils_file in utils_files:
try:
__snake_case = os.path.basename(SCREAMING_SNAKE_CASE_ )
__snake_case = imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(F'Moving {dest_folder} to {utils_file}' )
shutil.copy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except KeyError:
self._logger.error(F'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 356 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__UpperCAmelCase = random.Random()
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : str=1.0 , snake_case__ : int=None , snake_case__ : Union[str, Any]=None ) -> Any:
if rng is None:
UpperCamelCase : int = global_rng
UpperCamelCase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=2000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1_6000, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, ) -> List[str]:
UpperCamelCase : Dict = parent
UpperCamelCase : Dict = batch_size
UpperCamelCase : Any = min_seq_length
UpperCamelCase : Optional[int] = max_seq_length
UpperCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase : Tuple = feature_size
UpperCamelCase : Any = padding_value
UpperCamelCase : Tuple = sampling_rate
UpperCamelCase : Optional[Any] = return_attention_mask
UpperCamelCase : Optional[Any] = do_normalize
def snake_case_ ( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Any = WavaVecaFeatureExtractor
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = WavaVecaFeatureExtractionTester(self )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_, axis=0 ) - 1 ) < 1e-3 ) )
def snake_case_ ( self ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase : Any = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Dict = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase : List[Any] = feat_extract(speech_inputs[0], return_tensors='np' ).input_values
UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0], return_tensors='np' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test batched
UpperCamelCase : List[Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : int = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : Dict = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : Any = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = feat_extract(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Tuple = range(800, 1400, 200 )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in lengths]
UpperCamelCase : int = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = feat_extract(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : int = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='max_length', return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='longest', return_tensors='np' )
UpperCamelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=2000, padding='longest', return_tensors='np' )
UpperCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def snake_case_ ( self ) -> str:
import torch
UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}], return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase : Any = feature_extractor.pad([{'input_values': inputs}], return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def snake_case_ ( self ) -> Tuple:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
UpperCamelCase : int = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask, config.feat_extract_norm == 'layer' )
| 40 | 0 |
'''simple docstring'''
import qiskit
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> qiskit.result.counts.Counts:
'''simple docstring'''
a_ = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
a_ = qiskit.QuantumCircuit(snake_case__ ,snake_case__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] ,[0] )
# Execute the circuit on the simulator
a_ = qiskit.execute(snake_case__ ,snake_case__ ,shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(snake_case__ )
if __name__ == "__main__":
print(F'Total count for various states are: {single_qubit_measure(1, 1)}')
| 685 |
def UpperCamelCase ( snake_case__ : int ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
UpperCamelCase : int = False
if num < 0:
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Tuple = -num
UpperCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 0 |
from __future__ import annotations
__lowerCamelCase : Optional[int] = 10
def A_ ( _lowerCAmelCase ) -> list[int]:
UpperCamelCase : Tuple = 1
UpperCamelCase : int = max(snake_case__ )
while placement <= max_digit:
# declare and initialize empty buckets
UpperCamelCase : list[list] = [[] for _ in range(snake_case__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
UpperCamelCase : int = int((i / placement) % RADIX )
buckets[tmp].append(snake_case__ )
# put each buckets' contents into list_of_ints
UpperCamelCase : List[str] = 0
for b in range(snake_case__ ):
for i in buckets[b]:
UpperCamelCase : Dict = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__UpperCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None ) -> Optional[Any]:
# Recurse if needed
if "." in tensor_name:
UpperCamelCase : List[Any] = tensor_name.split('.' )
for split in splits[:-1]:
UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
UpperCamelCase : Dict = new_module
UpperCamelCase : int = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
UpperCamelCase : Union[str, Any] = tensor_name in module._buffers
UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
UpperCamelCase : Optional[Any] = False
UpperCamelCase : str = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase : List[str] = False
UpperCamelCase : Tuple = False
else:
UpperCamelCase : Union[str, Any] = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
UpperCamelCase : Optional[int] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase : List[Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase : Dict = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
UpperCamelCase : List[Any] = value.to('cpu' )
if value.dtype == torch.inta:
UpperCamelCase : Tuple = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
UpperCamelCase : Union[str, Any] = torch.tensor(snake_case__ , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
UpperCamelCase : Union[str, Any] = new_value.T
UpperCamelCase : Union[str, Any] = old_value.__dict__
if is_abit:
UpperCamelCase : Optional[Any] = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
UpperCamelCase : Optional[Any] = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
UpperCamelCase : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
UpperCamelCase : Union[str, Any] = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
UpperCamelCase : List[str] = value.to(snake_case__ )
else:
UpperCamelCase : Tuple = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
UpperCamelCase : Optional[int] = new_value
else:
UpperCamelCase : Tuple = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
UpperCamelCase : List[str] = new_value
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Any=None , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=False ) -> int:
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase : str = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase , UpperCamelCase : Tuple = module.weight.shape
else:
UpperCamelCase : Any = module.in_features
UpperCamelCase : List[str] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase : Any = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCamelCase : Optional[int] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase : str = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCamelCase : int = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase : Any = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase : Optional[int] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None ) -> Optional[Any]:
UpperCamelCase : Union[str, Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase : List[str] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCamelCase ( *snake_case__ : Tuple , **snake_case__ : List[str] ) -> List[str]:
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def UpperCamelCase ( *snake_case__ : Dict , **snake_case__ : str ) -> Tuple:
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple ) -> List[Any]:
UpperCamelCase : int = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase : List[str] = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase : Union[str, Any] = sum(snake_case__ , [] )
UpperCamelCase : Optional[int] = len(snake_case__ ) > 0
# Check if it is a base model
UpperCamelCase : str = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase : List[Any] = list(model.named_children() )
UpperCamelCase : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase : Union[str, Any] = set(snake_case__ ) - set(snake_case__ )
UpperCamelCase : Optional[int] = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
UpperCamelCase : Tuple = ['.weight', '.bias']
UpperCamelCase : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase : Optional[int] = name.replace(snake_case__ , '' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 40 | 0 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ ( self :Union[str, Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str , lowerCamelCase_ :List[Any] ) -> int:
"""simple docstring"""
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for a, b in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , delta=SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( self :Tuple ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def lowerCamelCase__ ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = None
ops.enable_eager_execution_internal()
UpperCamelCase__ = tf.config.list_physical_devices("CPU" )
if len(SCREAMING_SNAKE_CASE_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
UpperCamelCase__ = tf.config.list_logical_devices(device_type="CPU" )
UpperCamelCase__ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
UpperCamelCase__ = GradientAccumulator()
UpperCamelCase__ = tf.Variable([4.0, 3.0] )
UpperCamelCase__ = create_optimizer(5e-5 , 1_0 , 5 )
UpperCamelCase__ = tf.Variable([0.0, 0.0] , trainable=SCREAMING_SNAKE_CASE_ )
def accumulate_on_replica(lowerCamelCase_ :Dict ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(lowerCamelCase_ :int , lowerCamelCase_ :Any ):
with strategy.scope():
UpperCamelCase__ = strategy.experimental_local_results(SCREAMING_SNAKE_CASE_ )
local_variables[0].assign(SCREAMING_SNAKE_CASE_ )
local_variables[1].assign(SCREAMING_SNAKE_CASE_ )
strategy.run(SCREAMING_SNAKE_CASE_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(SCREAMING_SNAKE_CASE_ )
def _check_local_values(lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] ):
UpperCamelCase__ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , SCREAMING_SNAKE_CASE_ , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , SCREAMING_SNAKE_CASE_ , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 516 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( snake_case__ : int ) -> Dict:
UpperCamelCase : Optional[Any] = tmp_path / 'file.csv'
UpperCamelCase : Optional[Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> List[str]:
UpperCamelCase : Optional[Any] = tmp_path / 'malformed_file.csv'
UpperCamelCase : Any = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> str:
UpperCamelCase : Any = tmp_path / 'csv_with_image.csv'
UpperCamelCase : Dict = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple:
UpperCamelCase : List[str] = tmp_path / 'csv_with_label.csv'
UpperCamelCase : Dict = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Dict ) -> List[str]:
UpperCamelCase : List[str] = tmp_path / 'csv_with_int_list.csv'
UpperCamelCase : Union[str, Any] = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[Any] ) -> List[Any]:
UpperCamelCase : str = Csv()
UpperCamelCase : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(snake_case__ , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(snake_case__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> Optional[int]:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : List[str] = f.read().splitlines()[1]
UpperCamelCase : int = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
UpperCamelCase : Any = csv._generate_tables([[csv_file_with_image]] )
UpperCamelCase : Any = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
UpperCamelCase : str = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( snake_case__ : Any ) -> str:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : Any = f.read().splitlines()[1:]
UpperCamelCase : Union[str, Any] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
UpperCamelCase : int = csv._generate_tables([[csv_file_with_label]] )
UpperCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
UpperCamelCase : List[str] = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(snake_case__ ) for label in labels]
def UpperCamelCase ( snake_case__ : str ) -> List[Any]:
UpperCamelCase : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda snake_case__ : [int(snake_case__ ) for i in x.split()]} )
UpperCamelCase : List[str] = csv._generate_tables([[csv_file_with_int_list]] )
UpperCamelCase : Union[str, Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
UpperCamelCase : str = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 40 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowerCamelCase ( __lowerCAmelCase : Union[List, PIL.Image.Image, torch.Tensor] ) -> int:
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , snake_case__ , )
if isinstance(snake_case__ , torch.Tensor ):
return image
elif isinstance(snake_case__ , PIL.Image.Image ):
snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
snake_case = image[0].size
snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
snake_case = np.concatenate(snake_case__ , axis=0 )
snake_case = np.array(snake_case__ ).astype(np.floataa ) / 255.0
snake_case = image.transpose(0 , 3 , 1 , 2 )
snake_case = 2.0 * image - 1.0
snake_case = torch.from_numpy(snake_case__ )
elif isinstance(image[0] , torch.Tensor ):
snake_case = torch.cat(snake_case__ , dim=0 )
return image
def __lowerCamelCase ( __lowerCAmelCase : Union[List, PIL.Image.Image, torch.Tensor] ) -> Optional[int]:
if isinstance(snake_case__ , torch.Tensor ):
return mask
elif isinstance(snake_case__ , PIL.Image.Image ):
snake_case = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
snake_case = mask[0].size
snake_case = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
snake_case = np.concatenate(snake_case__ , axis=0 )
snake_case = mask.astype(np.floataa ) / 255.0
snake_case = 0
snake_case = 1
snake_case = torch.from_numpy(snake_case__ )
elif isinstance(mask[0] , torch.Tensor ):
snake_case = torch.cat(snake_case__ , dim=0 )
return mask
class _lowerCAmelCase ( a__ ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
def __init__( self : str , __snake_case : Any , __snake_case : str )-> Optional[Any]:
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self : Any , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : List[str] = 2_50 , __snake_case : str = 0.0 , __snake_case : Union[str, Any] = 10 , __snake_case : Optional[Any] = 10 , __snake_case : int = None , __snake_case : List[str] = "pil" , __snake_case : Optional[Any] = True , )-> Union[ImagePipelineOutput, Tuple]:
snake_case = image
snake_case = _preprocess_image(SCREAMING_SNAKE_CASE_ )
snake_case = original_image.to(device=self.device , dtype=self.unet.dtype )
snake_case = _preprocess_mask(SCREAMING_SNAKE_CASE_ )
snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype )
snake_case = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case = original_image.shape
snake_case = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
snake_case = eta
snake_case = self.scheduler.timesteps[0] + 1
snake_case = generator[0] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
snake_case = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# compute previous image: x_t -> x_t-1
snake_case = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
snake_case = self.scheduler.undo_step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
snake_case = t
snake_case = (image / 2 + 0.5).clamp(0 , 1 )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 369 |
import math
import random
def UpperCamelCase ( snake_case__ : float , snake_case__ : bool = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> float:
UpperCamelCase : Optional[Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(snake_case__ ):
# Forward propagation
UpperCamelCase : str = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCamelCase : int = (expected / 100) - layer_a
# Error delta
UpperCamelCase : List[str] = layer_1_error * sigmoid_function(snake_case__ , snake_case__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input('''Expected value: '''))
__UpperCAmelCase = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 40 | 0 |
from itertools import count
def a_ ( UpperCamelCase_ : int = 5_0 ) -> int:
"""simple docstring"""
lowerCamelCase = [1] * min_block_length
for n in count(snake_case__ ):
fill_count_functions.append(1 )
for block_length in range(snake_case__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 246 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase ( snake_case__ : Dict ) -> Optional[int]:
return EnvironmentCommand()
class lowerCAmelCase_ ( a__ ):
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : List[Any] = parser.add_parser('env' )
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = huggingface_hub.__version__
UpperCamelCase : int = 'not installed'
UpperCamelCase : Union[str, Any] = 'NA'
if is_torch_available():
import torch
UpperCamelCase : Any = torch.__version__
UpperCamelCase : str = torch.cuda.is_available()
UpperCamelCase : Dict = 'not installed'
if is_transformers_available():
import transformers
UpperCamelCase : str = transformers.__version__
UpperCamelCase : Optional[Any] = 'not installed'
if is_accelerate_available():
import accelerate
UpperCamelCase : Dict = accelerate.__version__
UpperCamelCase : List[str] = 'not installed'
if is_xformers_available():
import xformers
UpperCamelCase : List[str] = xformers.__version__
UpperCamelCase : Dict = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(SCREAMING_SNAKE_CASE_ ) )
return info
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 40 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Tuple =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] ={
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class A_ ( a__ ):
_A :List[Any] = "biogpt"
def __init__( self : List[Any] , snake_case__ : Dict=4_23_84 , snake_case__ : int=10_24 , snake_case__ : Any=24 , snake_case__ : Optional[int]=16 , snake_case__ : int=40_96 , snake_case__ : Tuple="gelu" , snake_case__ : str=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : Dict=10_24 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : str=1E-12 , snake_case__ : Dict=True , snake_case__ : Optional[Any]=True , snake_case__ : str=0.0 , snake_case__ : List[Any]=0.0 , snake_case__ : Tuple=1 , snake_case__ : List[str]=0 , snake_case__ : List[Any]=2 , **snake_case__ : Optional[Any] , ):
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = scale_embedding
lowercase = use_cache
lowercase = layerdrop
lowercase = activation_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 428 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''▁'''
__UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
__UpperCAmelCase = {
'''facebook/xglm-564M''': 2_048,
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None:
UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCamelCase : Any = 7
UpperCamelCase : Optional[int] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
UpperCamelCase : Dict = kwargs.get('additional_special_tokens', [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase : Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
UpperCamelCase : Optional[int] = len(self.sp_model )
UpperCamelCase : Any = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
UpperCamelCase : int = self.__dict__.copy()
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : Any = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
UpperCamelCase : Any = {}
UpperCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCamelCase : Optional[int] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def snake_case_ ( self ) -> int:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_, out_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase : Union[str, Any] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Dict = ''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_, ' ' ).strip()
return out_string
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_, 'wb' ) as fi:
UpperCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 40 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def UpperCamelCase_ ( A__ : list[float] ):
'''simple docstring'''
return np.maximum(0 , snake_case__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 275 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
__UpperCAmelCase = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
UpperCAmelCase__ : Dict = RobertaTokenizer
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, errors=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, trim_offsets=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase : Dict = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('type' ) )
UpperCamelCase : List[str] = add_prefix_space
UpperCamelCase : Dict = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = add_prefix_space
UpperCamelCase : Optional[Any] = 'post_processor'
UpperCamelCase : Dict = getattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if tokenizer_component_instance:
UpperCamelCase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase : Optional[Any] = tuple(state['sep'] )
if "cls" in state:
UpperCamelCase : Optional[int] = tuple(state['cls'] )
UpperCamelCase : Any = False
if state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase : Optional[int] = add_prefix_space
UpperCamelCase : List[Any] = True
if state.get('trim_offsets', SCREAMING_SNAKE_CASE_ ) != trim_offsets:
UpperCamelCase : Dict = trim_offsets
UpperCamelCase : Union[str, Any] = True
if changes_to_apply:
UpperCamelCase : Tuple = getattr(SCREAMING_SNAKE_CASE_, state.pop('type' ) )
UpperCamelCase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE_ )
setattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else value
UpperCamelCase : List[Any] = value
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCamelCase : Optional[int] = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCamelCase : Dict = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCamelCase : Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Tuple:
UpperCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 40 | 0 |
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __lowerCAmelCase ( ) -> Any:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 439 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCAmelCase_ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
super().__init__(features=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = torch_tensor_kwargs
import torch # noqa import torch at initialization
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ):
return value.tolist()
UpperCamelCase : str = {}
if isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ):
UpperCamelCase : List[str] = {'dtype': torch.intaa}
elif isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ):
UpperCamelCase : int = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ):
UpperCamelCase : str = np.asarray(SCREAMING_SNAKE_CASE_ )
return torch.tensor(SCREAMING_SNAKE_CASE_, **{**default_dtype, **self.torch_tensor_kwargs} )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
import torch
# support for torch, tf, jax etc.
if hasattr(SCREAMING_SNAKE_CASE_, '__array__' ) and not isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ):
UpperCamelCase : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
return map_nested(self._recursive_tensorize, SCREAMING_SNAKE_CASE_, map_list=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : Dict = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> "torch.Tensor":
UpperCamelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_, pa_table.column_names[0] )
UpperCamelCase : Any = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = self._consolidate(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
for column_name in batch:
UpperCamelCase : str = self._consolidate(batch[column_name] )
return batch
| 40 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase_ ( a__ ):
"""simple docstring"""
lowercase = "bert"
def __init__( self : Tuple , snake_case_ : Optional[int]=30_522 , snake_case_ : Tuple=768 , snake_case_ : Optional[Any]=12 , snake_case_ : List[str]=12 , snake_case_ : str=3_072 , snake_case_ : List[str]="gelu" , snake_case_ : Tuple=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : Optional[int]=512 , snake_case_ : Optional[Any]=2 , snake_case_ : List[Any]=0.02 , snake_case_ : List[str]=1E-1_2 , snake_case_ : int=0 , snake_case_ : Tuple="absolute" , snake_case_ : Union[str, Any]=True , snake_case_ : Optional[Any]=None , **snake_case_ : Optional[int] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
snake_case__ : List[str] = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : str = hidden_act
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : int = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : List[str] = use_cache
snake_case__ : Union[str, Any] = classifier_dropout
class UpperCAmelCase_ ( a__ ):
"""simple docstring"""
@property
def lowerCamelCase ( self : Dict ):
if self.task == "multiple-choice":
snake_case__ : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case__ : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 374 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
UpperCamelCase : int = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(snake_case__ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCamelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(snake_case__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCamelCase : Dict = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(snake_case__ )
UpperCamelCase : List[Any] = []
for value in value_array:
UpperCamelCase : Optional[Any] = euclidean(snake_case__ , dataset[0] )
UpperCamelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCamelCase : Union[str, Any] = euclidean(snake_case__ , snake_case__ )
if dist > temp_dist:
UpperCamelCase : str = temp_dist
UpperCamelCase : List[str] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float:
return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 9 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__UpperCAmelCase = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__UpperCAmelCase = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__UpperCAmelCase = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__UpperCAmelCase = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image)
__UpperCAmelCase = np.expand_dims(test_image, axis=0)
__UpperCAmelCase = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__UpperCAmelCase = '''Normal'''
if result[0][0] == 1:
__UpperCAmelCase = '''Abnormality detected'''
| 40 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( a ):
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('Input value must be an \'int\' type' )
__snake_case = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
import os
import pytest
from attr import dataclass
__UpperCAmelCase = '''us-east-1''' # defaults region
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : str
UpperCAmelCase__ : Tuple = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
UpperCAmelCase__ : Union[str, Any] = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
UpperCAmelCase__ : Dict = {**hyperparameters, "max_steps": 1000}
@property
def snake_case_ ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ) -> str:
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ) -> str:
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def UpperCamelCase ( snake_case__ : Any ) -> Union[str, Any]:
UpperCamelCase : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 40 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE__ ( a__ ):
_UpperCAmelCase ="yolos"
def __init__( self: List[str] , a: int=7_68 , a: List[str]=12 , a: Dict=12 , a: Any=30_72 , a: List[Any]="gelu" , a: Any=0.0 , a: Dict=0.0 , a: Dict=0.02 , a: Dict=1e-12 , a: List[str]=[5_12, 8_64] , a: int=16 , a: List[str]=3 , a: Dict=True , a: List[Any]=1_00 , a: Union[str, Any]=True , a: Optional[Any]=False , a: Optional[int]=1 , a: str=5 , a: List[Any]=2 , a: int=5 , a: Any=2 , a: Union[str, Any]=0.1 , **a: Dict , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = qkv_bias
a_ = num_detection_tokens
a_ = use_mid_position_embeddings
a_ = auxiliary_loss
# Hungarian matcher
a_ = class_cost
a_ = bbox_cost
a_ = giou_cost
# Loss coefficients
a_ = bbox_loss_coefficient
a_ = giou_loss_coefficient
a_ = eos_coefficient
class SCREAMING_SNAKE_CASE__ ( a__ ):
_UpperCAmelCase =version.parse('''1.11''' )
@property
def _lowerCAmelCase ( self: Tuple) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def _lowerCAmelCase ( self: Union[str, Any]) ->float:
'''simple docstring'''
return 1e-4
@property
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
return 12
| 685 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__UpperCAmelCase = '''src/transformers'''
__UpperCAmelCase = '''docs/source/en/tasks'''
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Any ) -> Optional[int]:
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase : Optional[Any] = f.readlines()
# Find the start prompt.
UpperCamelCase : List[Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
UpperCamelCase : Optional[Any] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
__UpperCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__UpperCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase : Tuple = TASK_GUIDE_TO_MODELS[task_guide]
UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
UpperCamelCase : Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def UpperCamelCase ( snake_case__ : str , snake_case__ : Optional[int]=False ) -> Tuple:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
UpperCamelCase : Optional[Any] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
' to fix this.' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__UpperCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 40 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( a__ ):
_UpperCAmelCase :Dict = ["image_processor", "tokenizer"]
_UpperCAmelCase :Optional[Any] = "BlipImageProcessor"
_UpperCAmelCase :Optional[Any] = "AutoTokenizer"
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = False
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.image_processor
def __call__( self , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
UpperCamelCase : Union[str, Any] = self.tokenizer
UpperCamelCase : Optional[Any] = self.tokenizer(
text=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_length=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
return text_encoding
# add pixel_values
UpperCamelCase : Dict = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
if text is not None:
UpperCamelCase : Any = self.tokenizer(
text=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_length=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
else:
UpperCamelCase : Dict = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE_ )
return encoding_image_processor
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.tokenizer.model_input_names
UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 629 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : int = IFPipeline
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case_ ( self ) -> str:
return self._get_dummy_components()
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA' )
def snake_case_ ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case_ ( self ) -> Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_local()
def snake_case_ ( self ) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def snake_case_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> List[Any]:
# if
UpperCamelCase : Union[str, Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0', variant='fp16', torch_dtype=torch.floataa )
UpperCamelCase : str = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0', variant='fp16', torch_dtype=torch.floataa, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
UpperCamelCase , UpperCamelCase : List[str] = pipe_a.encode_prompt('anime turtle', device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCamelCase : int = None
UpperCamelCase : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCamelCase : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
UpperCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCamelCase : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
UpperCamelCase : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
UpperCamelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : int = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : Dict = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = floats_tensor((1, 3, 256, 256), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 40 | 0 |
"""simple docstring"""
from __future__ import annotations
A : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any] ) -> None:
"""simple docstring"""
UpperCamelCase__ = graph
# mapping node to its parent in resulting breadth first tree
UpperCamelCase__ = {}
UpperCamelCase__ = source_vertex
def lowerCamelCase__ ( self :Dict ) -> None:
"""simple docstring"""
UpperCamelCase__ = {self.source_vertex}
UpperCamelCase__ = None
UpperCamelCase__ = [self.source_vertex] # first in first out queue
while queue:
UpperCamelCase__ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = vertex
queue.append(SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( self :Tuple , lowerCamelCase_ :Optional[int] ) -> str:
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCamelCase__ = self.parent.get(SCREAMING_SNAKE_CASE_ )
if target_vertex_parent is None:
UpperCamelCase__ = (
f'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
return self.shortest_path(SCREAMING_SNAKE_CASE_ ) + f'->{target_vertex}'
if __name__ == "__main__":
A : int = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 516 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def UpperCamelCase ( snake_case__ : Tuple="" ) -> str:
UpperCamelCase : Union[str, Any] = tempfile.mkdtemp()
return os.path.join(snake_case__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> int:
UpperCamelCase : Union[str, Any] = torch.rand(12, dtype=torch.floataa ) - 0.5
UpperCamelCase : Union[str, Any] = AgentAudio(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
# Ensure that the file contains the same value as the original tensor
UpperCamelCase , UpperCamelCase : Any = sf.read(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, torch.tensor(SCREAMING_SNAKE_CASE_ ), atol=1e-4 ) )
def snake_case_ ( self ) -> Any:
UpperCamelCase : Optional[int] = torch.rand(12, dtype=torch.floataa ) - 0.5
UpperCamelCase : Union[str, Any] = get_new_path(suffix='.wav' )
sf.write(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, 1_6000 )
UpperCamelCase : int = AgentAudio(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) )
self.assertEqual(agent_type.to_string(), SCREAMING_SNAKE_CASE_ )
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Any:
UpperCamelCase : Dict = torch.randint(0, 256, (64, 64, 3) )
UpperCamelCase : Union[str, Any] = AgentImage(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type._tensor, atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw(), Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
UpperCamelCase : Optional[int] = Image.open(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = AgentImage(SCREAMING_SNAKE_CASE_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
UpperCamelCase : Union[str, Any] = Image.open(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = AgentImage(SCREAMING_SNAKE_CASE_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = 'Hey!'
UpperCamelCase : Dict = AgentText(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_string() )
self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_raw() )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 40 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _lowerCAmelCase ( a__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CpmAntTokenizer
snake_case_ = False
def lowerCAmelCase ( self : Tuple )-> List[str]:
super().setUp()
snake_case = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def lowerCAmelCase ( self : str )-> Tuple:
snake_case = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
snake_case = '今天天气真好!'
snake_case = ['今天', '天气', '真', '好', '!']
snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
snake_case = '今天天气真好!'
snake_case = [tokenizer.bos_token] + tokens
snake_case = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 369 |
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Any ) -> Union[str, Any]:
UpperCamelCase : int = [1]
for i in range(2 , snake_case__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCamelCase : List[Any] = []
UpperCamelCase : List[Any] = list(range(snake_case__ ) )
# Find permutation
while factorials:
UpperCamelCase : int = factorials.pop()
UpperCamelCase , UpperCamelCase : int = divmod(snake_case__ , snake_case__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 0 |
from math import factorial
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , __snake_case : Optional[int] , __snake_case : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowerCamelCase = real
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase = [1] * rank
else:
lowerCamelCase = rank
def __repr__( self : Tuple ) -> Any:
'''simple docstring'''
return (
F'''{self.real}+'''
F'''{"+".join(str(SCREAMING_SNAKE_CASE_ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def lowerCamelCase__ ( self : List[str] ) -> str:
'''simple docstring'''
lowerCamelCase = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , SCREAMING_SNAKE_CASE_ )
def __add__( self : List[str] , __snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return Dual(self.real + other , self.duals )
lowerCamelCase = self.duals.copy()
lowerCamelCase = other.duals.copy()
if len(SCREAMING_SNAKE_CASE_ ) > len(SCREAMING_SNAKE_CASE_ ):
o_dual.extend([1] * (len(SCREAMING_SNAKE_CASE_ ) - len(SCREAMING_SNAKE_CASE_ )) )
elif len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ):
s_dual.extend([1] * (len(SCREAMING_SNAKE_CASE_ ) - len(SCREAMING_SNAKE_CASE_ )) )
lowerCamelCase = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , SCREAMING_SNAKE_CASE_ )
snake_case = __add__
def __sub__( self : str , __snake_case : Dict ) -> List[str]:
'''simple docstring'''
return self + other * -1
def __mul__( self : Optional[int] , __snake_case : List[str] ) -> List[Any]:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , SCREAMING_SNAKE_CASE_ )
lowerCamelCase = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , SCREAMING_SNAKE_CASE_ )
snake_case = __mul__
def __truediv__( self : str , __snake_case : Any ) -> Tuple:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , SCREAMING_SNAKE_CASE_ )
raise ValueError
def __floordiv__( self : List[Any] , __snake_case : List[Any] ) -> int:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , SCREAMING_SNAKE_CASE_ )
raise ValueError
def __pow__( self : Optional[int] , __snake_case : List[Any] ) -> List[str]:
'''simple docstring'''
if n < 0 or isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
lowerCamelCase = self
for _ in range(n - 1 ):
x *= self
return x
def a_ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict ) -> Optional[int]:
"""simple docstring"""
if not callable(snake_case__ ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(snake_case__ , (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('differentiate() requires an int as input for order' )
lowerCamelCase = Dual(snake_case__ , 1 )
lowerCamelCase = func(snake_case__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def a_ ( UpperCamelCase_ : str ) -> Optional[Any]:
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 246 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ ( a__ ):
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'width_multiplier' ) )
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_="swish", SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=0.25, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, ) -> Any:
UpperCamelCase : int = parent
UpperCamelCase : int = batch_size
UpperCamelCase : List[Any] = image_size
UpperCamelCase : List[str] = patch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : List[str] = make_divisible(512 * width_multiplier, divisor=8 )
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : Optional[int] = conv_kernel_size
UpperCamelCase : List[str] = output_stride
UpperCamelCase : Union[str, Any] = classifier_dropout_prob
UpperCamelCase : List[Any] = use_labels
UpperCamelCase : Any = is_training
UpperCamelCase : int = num_labels
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Tuple = scope
UpperCamelCase : List[str] = width_multiplier
UpperCamelCase : Any = ffn_dropout
UpperCamelCase : List[Any] = attn_dropout
def snake_case_ ( self ) -> int:
UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : List[str] = None
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCamelCase : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self ) -> int:
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : Any = MobileViTVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Optional[int] = self.num_labels
UpperCamelCase : Tuple = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Any = self.num_labels
UpperCamelCase : Optional[Any] = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = config_and_inputs
UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Tuple = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Any = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[Any] = False
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Dict = MobileViTVaModelTester(self )
UpperCamelCase : Optional[Any] = MobileViTVaConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def snake_case_ ( self ) -> int:
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def snake_case_ ( self ) -> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case_ ( self ) -> Any:
pass
def snake_case_ ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : str = [*signature.parameters.keys()]
UpperCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Tuple:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Tuple = outputs.hidden_states
UpperCamelCase : Dict = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCamelCase : Any = 2
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ) -> Optional[Any]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = MobileViTVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> str:
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.default_image_processor
UpperCamelCase : Any = prepare_img()
UpperCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : List[str] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : Any = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = outputs.logits
# verify the logits
UpperCamelCase : Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
], device=SCREAMING_SNAKE_CASE_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : str = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Optional[int] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Tuple = prepare_img()
UpperCamelCase : int = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : str = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = outputs.logits.detach().cpu()
UpperCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_, target_sizes=[(50, 60)] )
UpperCamelCase : Optional[int] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
| 40 | 0 |
class A_ :
def __init__( self : List[Any] , snake_case__ : Tuple ):
lowercase = n
lowercase = [None] * self.n
lowercase = 0 # index of the first element
lowercase = 0
lowercase = 0
def __len__( self : Tuple ):
return self.size
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return self.size == 0
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return False if self.is_empty() else self.array[self.front]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : List[str] ):
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
lowercase = data
lowercase = (self.rear + 1) % self.n
self.size += 1
return self
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
if self.size == 0:
raise Exception("""UNDERFLOW""" )
lowercase = self.array[self.front]
lowercase = None
lowercase = (self.front + 1) % self.n
self.size -= 1
return temp
| 428 |
def UpperCamelCase ( snake_case__ : Optional[int] ) -> str:
UpperCamelCase : List[str] = [0] * len(snake_case__ )
UpperCamelCase : int = []
UpperCamelCase : Optional[int] = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
UpperCamelCase : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCamelCase : Tuple = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
__UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 40 | 0 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def UpperCamelCase_ ( A__ : Union[dict, list, tuple, torch.Tensor] ):
'''simple docstring'''
lowerCAmelCase_ : str = []
if isinstance(snake_case__ , snake_case__ ):
for v in tree.values():
shapes.extend(_fetch_dims(snake_case__ ) )
elif isinstance(snake_case__ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(snake_case__ ) )
elif isinstance(snake_case__ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def UpperCamelCase_ ( A__ : int , A__ : Tuple[int, ...] ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = []
for d in reversed(snake_case__ ):
idx.append(flat_idx % d )
lowerCAmelCase_ : Optional[Any] = flat_idx // d
return tuple(reversed(snake_case__ ) )
@torch.jit.ignore
def UpperCamelCase_ ( A__ : Sequence[int] , A__ : Sequence[int] , A__ : Sequence[int] , A__ : Optional[Sequence[bool]] = None , A__ : Optional[Sequence[bool]] = None , ):
'''simple docstring'''
def reduce_edge_list(A__ : List[bool] ) -> None:
lowerCAmelCase_ : Optional[Any] = True
for i in range(len(snake_case__ ) ):
lowerCAmelCase_ : int = -1 * (i + 1)
l[reversed_idx] &= tally
lowerCAmelCase_ : Optional[Any] = l[reversed_idx]
if start_edges is None:
lowerCAmelCase_ : Dict = [s == 0 for s in start]
reduce_edge_list(snake_case__ )
if end_edges is None:
lowerCAmelCase_ : int = [e == (d - 1) for e, d in zip(snake_case__ , snake_case__ )]
reduce_edge_list(snake_case__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(snake_case__ ) == 0:
return [()]
elif len(snake_case__ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
lowerCAmelCase_ : List[Tuple[slice, ...]] = []
lowerCAmelCase_ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(snake_case__ , snake_case__ ):
if s == e:
path_list.append(slice(snake_case__ , s + 1 ) )
else:
break
lowerCAmelCase_ : Tuple[slice, ...] = tuple(snake_case__ )
lowerCAmelCase_ : str = len(snake_case__ )
# start == end, and we're done
if divergence_idx == len(snake_case__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCAmelCase_ : Optional[int] = start[divergence_idx]
return tuple(
path + (slice(snake_case__ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCAmelCase_ : List[Any] = end[divergence_idx]
return tuple(
path + (slice(snake_case__ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
lowerCAmelCase_ : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def UpperCamelCase_ ( A__ : torch.Tensor , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = t.shape[:no_batch_dims]
lowerCAmelCase_ : str = list(_flat_idx_to_idx(snake_case__ , snake_case__ ) )
# _get_minimal_slice_set is inclusive
lowerCAmelCase_ : Optional[Any] = list(_flat_idx_to_idx(flat_end - 1 , snake_case__ ) )
# Get an ordered list of slices to perform
lowerCAmelCase_ : Union[str, Any] = _get_minimal_slice_set(
snake_case__ , snake_case__ , snake_case__ , )
lowerCAmelCase_ : Union[str, Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def UpperCamelCase_ ( A__ : Callable , A__ : Dict[str, Any] , A__ : int , A__ : int , A__ : bool = False , A__ : Any = None , A__ : bool = False , ):
'''simple docstring'''
if not (len(snake_case__ ) > 0):
raise ValueError("""Must provide at least one input""" )
lowerCAmelCase_ : Optional[int] = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case__ )]
lowerCAmelCase_ : Dict = tuple([max(snake_case__ ) for s in zip(*snake_case__ )] )
def _prep_inputs(A__ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
lowerCAmelCase_ : Tuple = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
lowerCAmelCase_ : List[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
lowerCAmelCase_ : Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
lowerCAmelCase_ : Dict[str, Any] = tensor_tree_map(_prep_inputs , snake_case__ )
lowerCAmelCase_ : List[Any] = None
if _out is not None:
lowerCAmelCase_ : List[Any] = tensor_tree_map(lambda A__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
lowerCAmelCase_ : List[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowerCAmelCase_ : List[Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(A__ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : List[Any] = prepped_outputs
for _ in range(snake_case__ ):
# Chunk the input
if not low_mem:
lowerCAmelCase_ : Tuple = _select_chunk
else:
lowerCAmelCase_ : int = partial(
_chunk_slice , flat_start=snake_case__ , flat_end=min(snake_case__ , i + chunk_size ) , no_batch_dims=len(snake_case__ ) , )
lowerCAmelCase_ : Dict[str, Any] = tensor_tree_map(snake_case__ , snake_case__ )
# Run the layer on the chunk
lowerCAmelCase_ : List[Any] = layer(**snake_case__ )
# Allocate space for the output
if out is None:
lowerCAmelCase_ : str = tensor_tree_map(lambda A__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , snake_case__ )
# Put the chunk in its pre-allocated space
if isinstance(snake_case__ , snake_case__ ):
def assign(A__ : dict , A__ : dict ) -> None:
for k, v in da.items():
if isinstance(snake_case__ , snake_case__ ):
assign(snake_case__ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowerCAmelCase_ : Optional[Any] = da[k]
assign(snake_case__ , snake_case__ )
elif isinstance(snake_case__ , snake_case__ ):
for xa, xa in zip(snake_case__ , snake_case__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowerCAmelCase_ : str = xa
elif isinstance(snake_case__ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowerCAmelCase_ : Optional[Any] = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
lowerCAmelCase_ : Tuple = tensor_tree_map(lambda A__ : t.view(orig_batch_dims + t.shape[1:] ) , snake_case__ )
return out
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : Tuple = 5_12 , ) -> List[Any]:
lowerCAmelCase_ : int = max_chunk_size
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[tuple] = None
def __lowercase ( self : Optional[int] , lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : Dict ) -> int:
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowerCAmelCase_ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowerCAmelCase_ : Tuple = [c for c in candidates if c > min_chunk_size]
lowerCAmelCase_ : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowerCamelCase : int ) -> bool:
try:
with torch.no_grad():
fn(*SCREAMING_SNAKE_CASE_ , chunk_size=SCREAMING_SNAKE_CASE_ )
return True
except RuntimeError:
return False
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Dict = len(SCREAMING_SNAKE_CASE_ ) - 1
while i > min_viable_chunk_size_index:
lowerCAmelCase_ : Optional[Any] = test_chunk_size(candidates[i] )
if not viable:
lowerCAmelCase_ : Tuple = (min_viable_chunk_size_index + i) // 2
else:
lowerCAmelCase_ : Union[str, Any] = i
lowerCAmelCase_ : str = (i + len(SCREAMING_SNAKE_CASE_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __lowercase ( self : Dict , lowerCamelCase : Dict , lowerCamelCase : str ) -> bool:
lowerCAmelCase_ : Any = True
for aa, aa in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert type(SCREAMING_SNAKE_CASE_ ) == type(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : Optional[int] = [v for _, v in sorted(aa.items() , key=lambda lowerCamelCase : x[0] )]
lowerCAmelCase_ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda lowerCamelCase : x[0] )]
consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
consistent &= aa == aa
return consistent
def __lowercase ( self : str , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] , ) -> int:
lowerCAmelCase_ : int = True
lowerCAmelCase_ : tuple = tree_map(lambda lowerCamelCase : a.shape if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) else a , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = self._compare_arg_caches(self.cached_arg_data , SCREAMING_SNAKE_CASE_ )
else:
# Otherwise, we can reuse the precomputed value
lowerCAmelCase_ : Tuple = False
if not consistent:
lowerCAmelCase_ : Union[str, Any] = self._determine_favorable_chunk_size(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : List[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 275 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 40 | 0 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __lowerCAmelCase ( _UpperCamelCase : Tuple ) -> Dict: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __lowerCAmelCase ( ) -> Dict:
'''simple docstring'''
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
SCREAMING_SNAKE_CASE = [1, 2, 3]
with pytest.raises(snake_case__ ):
with parallel_backend('unsupported backend' ):
map_nested(snake_case__ , snake_case__ , num_proc=2 )
with pytest.raises(snake_case__ ):
with parallel_backend('unsupported backend' ):
map_nested(snake_case__ , snake_case__ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [1, 2]
SCREAMING_SNAKE_CASE = {'a': 1, 'b': 2}
SCREAMING_SNAKE_CASE = {'a': [1, 2], 'b': [3, 4]}
SCREAMING_SNAKE_CASE = {'a': {'1': 1}, 'b': 2}
SCREAMING_SNAKE_CASE = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
SCREAMING_SNAKE_CASE = [2, 3]
SCREAMING_SNAKE_CASE = {'a': 2, 'b': 3}
SCREAMING_SNAKE_CASE = {'a': [2, 3], 'b': [4, 5]}
SCREAMING_SNAKE_CASE = {'a': {'1': 2}, 'b': 3}
SCREAMING_SNAKE_CASE = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(snake_case__ , snake_case__ , num_proc=snake_case__ ) == expected_map_nested_sa
assert map_nested(snake_case__ , snake_case__ , num_proc=snake_case__ ) == expected_map_nested_sa
assert map_nested(snake_case__ , snake_case__ , num_proc=snake_case__ ) == expected_map_nested_sa
assert map_nested(snake_case__ , snake_case__ , num_proc=snake_case__ ) == expected_map_nested_sa
assert map_nested(snake_case__ , snake_case__ , num_proc=snake_case__ ) == expected_map_nested_sa
| 439 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 0 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> str:
snake_case__ : List[str] = [0] * len(snake_case__ )
snake_case__ : int = []
snake_case__ : Optional[int] = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
snake_case__ : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case__ : Tuple = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
__a = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 374 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 0 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( a__ , unittest.TestCase ):
"""simple docstring"""
A__ : int = BlenderbotSmallTokenizer
A__ : Any = False
def _a ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
A__ = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
A__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A__ = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
A__ = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
def _a ( self : List[str] , **_snake_case : Optional[int] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _a ( self : Union[str, Any] , _snake_case : str ):
"""simple docstring"""
A__ = 'adapt act apte'
A__ = 'adapt act apte'
return input_text, output_text
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = 'adapt act apte'
A__ = ['adapt', 'act', 'ap@@', 'te']
A__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
A__ = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [13_84]
A__ = 'I am a small frog.'
A__ = tok([src_text] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )['input_ids']
A__ = tok.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _a ( self : Dict ):
"""simple docstring"""
A__ = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
A__ = 'I am a small frog .'
A__ = '.'
A__ = tok(SCREAMING_SNAKE_CASE_ )['input_ids']
A__ = tok(SCREAMING_SNAKE_CASE_ )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 9 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''ViTFeatureExtractor''']
__UpperCAmelCase = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class a_ ( a__ ):
lowercase_ : Optional[Any] = "mgp-str"
def __init__( self : Any , __lowerCAmelCase : Dict=[3_2, 1_2_8] , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : Dict=2_7 , __lowerCAmelCase : Any=3_8 , __lowerCAmelCase : Optional[Any]=5_0_2_5_7 , __lowerCAmelCase : str=3_0_5_2_2 , __lowerCAmelCase : Optional[int]=7_6_8 , __lowerCAmelCase : List[Any]=1_2 , __lowerCAmelCase : List[str]=1_2 , __lowerCAmelCase : List[str]=4.0 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Dict=False , __lowerCAmelCase : int=1E-5 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : int=0.0 , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : int=False , __lowerCAmelCase : List[Any]=0.02 , **__lowerCAmelCase : List[Any] , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = max_token_length
__snake_case = num_character_labels
__snake_case = num_bpe_labels
__snake_case = num_wordpiece_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = mlp_ratio
__snake_case = distilled
__snake_case = layer_norm_eps
__snake_case = drop_rate
__snake_case = qkv_bias
__snake_case = attn_drop_rate
__snake_case = drop_path_rate
__snake_case = output_aa_attentions
__snake_case = initializer_range
| 356 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__UpperCAmelCase = random.Random()
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : str=1.0 , snake_case__ : int=None , snake_case__ : Union[str, Any]=None ) -> Any:
if rng is None:
UpperCamelCase : int = global_rng
UpperCamelCase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=2000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1_6000, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, ) -> List[str]:
UpperCamelCase : Dict = parent
UpperCamelCase : Dict = batch_size
UpperCamelCase : Any = min_seq_length
UpperCamelCase : Optional[int] = max_seq_length
UpperCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase : Tuple = feature_size
UpperCamelCase : Any = padding_value
UpperCamelCase : Tuple = sampling_rate
UpperCamelCase : Optional[Any] = return_attention_mask
UpperCamelCase : Optional[Any] = do_normalize
def snake_case_ ( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Any = WavaVecaFeatureExtractor
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = WavaVecaFeatureExtractionTester(self )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_, axis=0 ) - 1 ) < 1e-3 ) )
def snake_case_ ( self ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase : Any = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Dict = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase : List[Any] = feat_extract(speech_inputs[0], return_tensors='np' ).input_values
UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0], return_tensors='np' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test batched
UpperCamelCase : List[Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : int = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : Dict = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : Any = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = feat_extract(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Tuple = range(800, 1400, 200 )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in lengths]
UpperCamelCase : int = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = feat_extract(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : int = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='max_length', return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='longest', return_tensors='np' )
UpperCamelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=2000, padding='longest', return_tensors='np' )
UpperCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def snake_case_ ( self ) -> str:
import torch
UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}], return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase : Any = feature_extractor.pad([{'input_values': inputs}], return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def snake_case_ ( self ) -> Tuple:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
UpperCamelCase : int = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask, config.feat_extract_norm == 'layer' )
| 40 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = ['model.decoder.embed_positions.weights']
def __UpperCAmelCase (lowercase__ ) -> Tuple:
'''simple docstring'''
if "emb" in name:
a_ = name.replace("emb" ,"model.decoder.embed_tokens" )
if "transformer" in name:
a_ = name.replace("transformer" ,"model.decoder" )
if "cross_attention" in name:
a_ = name.replace("cross_attention" ,"encoder_attn" )
if "linear1" in name:
a_ = name.replace("linear1" ,"fc1" )
if "linear2" in name:
a_ = name.replace("linear2" ,"fc2" )
if "norm1" in name:
a_ = name.replace("norm1" ,"self_attn_layer_norm" )
if "norm_cross" in name:
a_ = name.replace("norm_cross" ,"encoder_attn_layer_norm" )
if "norm2" in name:
a_ = name.replace("norm2" ,"final_layer_norm" )
if "out_norm" in name:
a_ = name.replace("out_norm" ,"model.decoder.layer_norm" )
if "linears" in name:
a_ = name.replace("linears" ,"lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
a_ = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" )
return name
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple[Dict, Dict]:
'''simple docstring'''
a_ = list(state_dict.keys() )
a_ = {}
for key in keys:
a_ = state_dict.pop(snake_case__ )
a_ = rename_keys(snake_case__ )
if "in_proj_weight" in key:
# split fused qkv proj
a_ = val[:hidden_size, :]
a_ = val[hidden_size : 2 * hidden_size, :]
a_ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
a_ = val
else:
a_ = val
return state_dict, enc_dec_proj_state_dict
def __UpperCAmelCase (lowercase__ ) -> MusicgenDecoderConfig:
'''simple docstring'''
if checkpoint == "small":
# default config values
a_ = 1024
a_ = 24
a_ = 16
elif checkpoint == "medium":
a_ = 1536
a_ = 48
a_ = 24
elif checkpoint == "large":
a_ = 2048
a_ = 48
a_ = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
a_ = MusicgenDecoderConfig(
hidden_size=snake_case__ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=snake_case__ ,num_attention_heads=snake_case__ ,)
return config
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__=None ,lowercase__=None ,lowercase__="cpu" ) -> List[str]:
'''simple docstring'''
a_ = MusicGen.get_pretrained(snake_case__ ,device=snake_case__ )
a_ = decoder_config_from_checkpoint(snake_case__ )
a_ = fairseq_model.lm.state_dict()
a_ = rename_state_dict(
snake_case__ ,hidden_size=decoder_config.hidden_size )
a_ = TaEncoderModel.from_pretrained("t5-base" )
a_ = EncodecModel.from_pretrained("facebook/encodec_32khz" )
a_ = MusicgenForCausalLM(snake_case__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
a_ = decoder.load_state_dict(snake_case__ ,strict=snake_case__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(snake_case__ )
if len(snake_case__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(snake_case__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
a_ = MusicgenForConditionalGeneration(text_encoder=snake_case__ ,audio_encoder=snake_case__ ,decoder=snake_case__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(snake_case__ )
# check we can do a forward pass
a_ = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
a_ = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
a_ = model(input_ids=snake_case__ ,decoder_input_ids=snake_case__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
a_ = AutoTokenizer.from_pretrained("t5-base" )
a_ = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" )
a_ = MusicgenProcessor(feature_extractor=snake_case__ ,tokenizer=snake_case__ )
# set the appropriate bos/pad token ids
a_ = 2048
a_ = 2048
# set other default generation config params
a_ = int(30 * audio_encoder.config.frame_rate )
a_ = True
a_ = 3.0
if pytorch_dump_folder is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(snake_case__ )
processor.push_to_hub(snake_case__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
a_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 685 |
def UpperCamelCase ( snake_case__ : int ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
UpperCamelCase : int = False
if num < 0:
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Tuple = -num
UpperCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.