code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = ConsistencyModelPipeline
__UpperCamelCase : List[str] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__UpperCamelCase : List[str] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__UpperCamelCase : Optional[Any] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: List[str] = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet""" , )
return unet
@property
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , )
return unet
def lowerCAmelCase__ ( self : Any , snake_case_ : Any=False ):
if class_cond:
UpperCamelCase_: Optional[int] = self.dummy_cond_unet
else:
UpperCamelCase_: Union[str, Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
UpperCamelCase_: List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCamelCase_: Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def lowerCAmelCase__ ( self : Dict , snake_case_ : str , snake_case_ : str=0 ):
if str(snake_case_ ).startswith("""mps""" ):
UpperCamelCase_: List[Any] = torch.manual_seed(snake_case_ )
else:
UpperCamelCase_: int = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
UpperCamelCase_: Optional[Any] = {
"""batch_size""": 1,
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""generator""": generator,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_: List[Any] = self.get_dummy_components()
UpperCamelCase_: int = ConsistencyModelPipeline(**snake_case_ )
UpperCamelCase_: Any = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Optional[Any] = self.get_dummy_inputs(snake_case_ )
UpperCamelCase_: Any = pipe(**snake_case_ ).images
assert image.shape == (1, 32, 32, 3)
UpperCamelCase_: Optional[int] = image[0, -3:, -3:, -1]
UpperCamelCase_: Tuple = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_: Optional[Any] = self.get_dummy_components(class_cond=snake_case_ )
UpperCamelCase_: str = ConsistencyModelPipeline(**snake_case_ )
UpperCamelCase_: Optional[int] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Any = self.get_dummy_inputs(snake_case_ )
UpperCamelCase_: Tuple = 0
UpperCamelCase_: List[str] = pipe(**snake_case_ ).images
assert image.shape == (1, 32, 32, 3)
UpperCamelCase_: Optional[int] = image[0, -3:, -3:, -1]
UpperCamelCase_: List[Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_: str = self.get_dummy_components()
UpperCamelCase_: Dict = ConsistencyModelPipeline(**snake_case_ )
UpperCamelCase_: Dict = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Optional[int] = self.get_dummy_inputs(snake_case_ )
UpperCamelCase_: str = 1
UpperCamelCase_: int = None
UpperCamelCase_: Optional[Any] = pipe(**snake_case_ ).images
assert image.shape == (1, 32, 32, 3)
UpperCamelCase_: Tuple = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: int = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_: str = self.get_dummy_components(class_cond=snake_case_ )
UpperCamelCase_: Optional[int] = ConsistencyModelPipeline(**snake_case_ )
UpperCamelCase_: Optional[int] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Optional[Any] = self.get_dummy_inputs(snake_case_ )
UpperCamelCase_: List[str] = 1
UpperCamelCase_: List[Any] = None
UpperCamelCase_: Any = 0
UpperCamelCase_: Union[str, Any] = pipe(**snake_case_ ).images
assert image.shape == (1, 32, 32, 3)
UpperCamelCase_: Any = image[0, -3:, -3:, -1]
UpperCamelCase_: Dict = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Any=0 , snake_case_ : Tuple=False , snake_case_ : Optional[Any]="cpu" , snake_case_ : str=torch.floataa , snake_case_ : str=(1, 3, 64, 64) ):
UpperCamelCase_: List[Any] = torch.manual_seed(snake_case_ )
UpperCamelCase_: Dict = {
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""class_labels""": 0,
"""generator""": generator,
"""output_type""": """np""",
}
if get_fixed_latents:
UpperCamelCase_: List[str] = self.get_fixed_latents(seed=snake_case_ , device=snake_case_ , dtype=snake_case_ , shape=snake_case_ )
UpperCamelCase_: Any = latents
return inputs
def lowerCAmelCase__ ( self : Any , snake_case_ : Optional[int]=0 , snake_case_ : Optional[int]="cpu" , snake_case_ : Any=torch.floataa , snake_case_ : List[Any]=(1, 3, 64, 64) ):
if type(snake_case_ ) == str:
UpperCamelCase_: Optional[Any] = torch.device(snake_case_ )
UpperCamelCase_: Optional[Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
UpperCamelCase_: List[str] = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
return latents
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Any = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCamelCase_: Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCamelCase_: List[str] = ConsistencyModelPipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(torch_device=snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Optional[int] = self.get_inputs()
UpperCamelCase_: List[str] = pipe(**snake_case_ ).images
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: int = image[0, -3:, -3:, -1]
UpperCamelCase_: str = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Tuple = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCamelCase_: str = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCamelCase_: Optional[int] = ConsistencyModelPipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(torch_device=snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = self.get_inputs()
UpperCamelCase_: Optional[int] = 1
UpperCamelCase_: str = None
UpperCamelCase_: List[str] = pipe(**snake_case_ ).images
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: int = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Tuple = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCamelCase_: List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCamelCase_: List[Any] = ConsistencyModelPipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(torch_device=snake_case_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Union[str, Any] = self.get_inputs(get_fixed_latents=snake_case_ , device=snake_case_ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=snake_case_ , enable_math=snake_case_ , enable_mem_efficient=snake_case_ ):
UpperCamelCase_: Optional[int] = pipe(**snake_case_ ).images
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: Optional[int] = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Tuple = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCamelCase_: str = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCamelCase_: List[str] = ConsistencyModelPipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(torch_device=snake_case_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = self.get_inputs(get_fixed_latents=snake_case_ , device=snake_case_ )
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: int = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=snake_case_ , enable_math=snake_case_ , enable_mem_efficient=snake_case_ ):
UpperCamelCase_: Tuple = pipe(**snake_case_ ).images
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[Any] = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 670 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : int ):
torch.manual_seed(0 )
UpperCamelCase_: Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCAmelCase__ ( self : Any ):
torch.manual_seed(0 )
UpperCamelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Union[str, Any] = self.dummy_uncond_unet
UpperCamelCase_: Optional[Any] = DDIMScheduler()
UpperCamelCase_: List[str] = self.dummy_vq_model
UpperCamelCase_: List[Any] = LDMPipeline(unet=snake_case_ , vqvae=snake_case_ , scheduler=snake_case_ )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = torch.manual_seed(0 )
UpperCamelCase_: int = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" ).images
UpperCamelCase_: Dict = torch.manual_seed(0 )
UpperCamelCase_: str = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=snake_case_ )[0]
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: str = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCamelCase_: Optional[Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = ldm(generator=snake_case_ , num_inference_steps=5 , output_type="""numpy""" ).images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase_: List[str] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCamelCase_: Dict = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 670 | 1 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : Union[str, Any] = logging.getLogger()
lowerCamelCase_ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Dict ):
os.makedirs(snake_case_ , exist_ok=snake_case_ )
UpperCamelCase_: int = {"""source""": """What is love ?""", """target""": """life"""}
UpperCamelCase_: Tuple = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase_: Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(snake_case_ , f'''{split}.{field}''' ) , """w""" ) as f:
f.write(snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : int , snake_case_ : str = "pytorch" ):
UpperCamelCase_: Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Dict = os.path.join(snake_case_ , """output""" )
UpperCamelCase_: Any = os.path.join(snake_case_ , """data""" )
self._create_dummy_data(data_dir=snake_case_ )
UpperCamelCase_: Union[str, Any] = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
UpperCamelCase_: Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(snake_case_ , env=self.get_env() )
UpperCamelCase_: Optional[int] = os.path.join(snake_case_ , """metrics.json""" )
with open(snake_case_ ) as f:
UpperCamelCase_: Any = json.load(snake_case_ )
return result
@require_torch_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 670 |
def A__ ( lowerCamelCase = 50 ) -> int:
UpperCamelCase_: List[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 670 | 1 |
from __future__ import annotations
def A__ ( lowerCamelCase , lowerCamelCase ) -> list[list[int]]:
UpperCamelCase_: list[list[int]] = []
UpperCamelCase_: list[int] = []
UpperCamelCase_: int = 0
UpperCamelCase_: Any = sum(lowerCamelCase )
create_state_space_tree(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
return result
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> None:
if sum(lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(lowerCamelCase )) < max_sum:
return
if sum(lowerCamelCase ) == max_sum:
result.append(lowerCamelCase )
return
for index in range(lowerCamelCase , len(lowerCamelCase ) ):
create_state_space_tree(
lowerCamelCase , lowerCamelCase , index + 1 , [*path, nums[index]] , lowerCamelCase , remaining_nums_sum - nums[index] , )
lowerCamelCase_ : Tuple = [3, 34, 4, 12, 5, 2]
lowerCamelCase_ : Optional[int] = 9
lowerCamelCase_ : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 670 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Initialise PyTorch model
UpperCamelCase_: List[Any] = TaConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Any = TaForConditionalGeneration(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 670 | 1 |
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=_A ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = ["""flax"""]
def __init__( self : Any , *snake_case_ : Union[str, Any] , **snake_case_ : Dict ):
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : str , *snake_case_ : Optional[Any] , **snake_case_ : List[Any] ):
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : str , *snake_case_ : int , **snake_case_ : int ):
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=_A ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ["""flax"""]
def __init__( self : Tuple , *snake_case_ : Any , **snake_case_ : Union[str, Any] ):
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : str , *snake_case_ : str , **snake_case_ : str ):
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : Any , *snake_case_ : Dict , **snake_case_ : Optional[Any] ):
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=_A ):
'''simple docstring'''
__UpperCamelCase : List[Any] = ["""flax"""]
def __init__( self : int , *snake_case_ : int , **snake_case_ : Optional[int] ):
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : Dict , *snake_case_ : Any , **snake_case_ : List[str] ):
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] , *snake_case_ : Optional[int] , **snake_case_ : List[Any] ):
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=_A ):
'''simple docstring'''
__UpperCamelCase : List[str] = ["""flax"""]
def __init__( self : str , *snake_case_ : List[Any] , **snake_case_ : int ):
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[Any] , *snake_case_ : str , **snake_case_ : Optional[int] ):
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : str , *snake_case_ : List[str] , **snake_case_ : List[Any] ):
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=_A ):
'''simple docstring'''
__UpperCamelCase : List[str] = ["""flax"""]
def __init__( self : int , *snake_case_ : List[str] , **snake_case_ : List[str] ):
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[Any] , *snake_case_ : str , **snake_case_ : Dict ):
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] , *snake_case_ : List[Any] , **snake_case_ : List[Any] ):
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=_A ):
'''simple docstring'''
__UpperCamelCase : Tuple = ["""flax"""]
def __init__( self : Optional[Any] , *snake_case_ : List[Any] , **snake_case_ : Tuple ):
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : int , *snake_case_ : Optional[int] , **snake_case_ : Any ):
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : int , *snake_case_ : int , **snake_case_ : Union[str, Any] ):
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=_A ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = ["""flax"""]
def __init__( self : List[Any] , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[int] ):
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : Tuple , *snake_case_ : Union[str, Any] , **snake_case_ : str ):
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : int , *snake_case_ : List[str] , **snake_case_ : Optional[int] ):
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=_A ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = ["""flax"""]
def __init__( self : Dict , *snake_case_ : int , **snake_case_ : List[Any] ):
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : Tuple , *snake_case_ : str , **snake_case_ : str ):
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[Any] , *snake_case_ : List[Any] , **snake_case_ : Union[str, Any] ):
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=_A ):
'''simple docstring'''
__UpperCamelCase : List[Any] = ["""flax"""]
def __init__( self : Tuple , *snake_case_ : str , **snake_case_ : int ):
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , *snake_case_ : Optional[int] , **snake_case_ : List[Any] ):
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : Tuple , *snake_case_ : List[str] , **snake_case_ : Optional[int] ):
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=_A ):
'''simple docstring'''
__UpperCamelCase : Dict = ["""flax"""]
def __init__( self : str , *snake_case_ : Optional[int] , **snake_case_ : Any ):
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , *snake_case_ : Optional[int] , **snake_case_ : Optional[int] ):
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , *snake_case_ : str , **snake_case_ : Tuple ):
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=_A ):
'''simple docstring'''
__UpperCamelCase : List[str] = ["""flax"""]
def __init__( self : Union[str, Any] , *snake_case_ : Optional[Any] , **snake_case_ : Optional[int] ):
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : int , *snake_case_ : List[Any] , **snake_case_ : List[Any] ):
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] , *snake_case_ : str , **snake_case_ : Any ):
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=_A ):
'''simple docstring'''
__UpperCamelCase : Any = ["""flax"""]
def __init__( self : Union[str, Any] , *snake_case_ : str , **snake_case_ : Optional[Any] ):
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[int] ):
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] , *snake_case_ : int , **snake_case_ : int ):
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=_A ):
'''simple docstring'''
__UpperCamelCase : List[str] = ["""flax"""]
def __init__( self : List[str] , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[Any] ):
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] , *snake_case_ : str , **snake_case_ : int ):
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] , *snake_case_ : List[Any] , **snake_case_ : Optional[Any] ):
requires_backends(cls , ["""flax"""] )
| 670 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : str = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
def A__ ( lowerCamelCase ) -> float:
return 10 - x * x
def A__ ( lowerCamelCase , lowerCamelCase ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(lowerCamelCase ) * equation(lowerCamelCase ) >= 0:
raise ValueError("""Wrong space!""" )
UpperCamelCase_: List[Any] = a
while (b - a) >= 0.01:
# Find middle point
UpperCamelCase_: Tuple = (a + b) / 2
# Check if middle point is root
if equation(lowerCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowerCamelCase ) * equation(lowerCamelCase ) < 0:
UpperCamelCase_: Union[str, Any] = c
else:
UpperCamelCase_: List[str] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 670 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = "x" , lowerCamelCase = 10**-10 , lowerCamelCase = 1 , ) -> complex:
UpperCamelCase_: Optional[Any] = symbols(lowerCamelCase )
UpperCamelCase_: int = lambdify(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[Any] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_: Tuple = starting_point
while True:
if diff_function(lowerCamelCase ) != 0:
UpperCamelCase_: List[Any] = prev_guess - multiplicity * func(lowerCamelCase ) / diff_function(
lowerCamelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase_: Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : str = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
import math
import sys
def A__ ( lowerCamelCase ) -> int:
if number != int(lowerCamelCase ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
UpperCamelCase_: List[Any] = [-1] * (number + 1)
UpperCamelCase_: Optional[Any] = 0
for i in range(1 , number + 1 ):
UpperCamelCase_: int = sys.maxsize
UpperCamelCase_: Union[str, Any] = int(math.sqrt(lowerCamelCase ) )
for j in range(1 , root + 1 ):
UpperCamelCase_: Any = 1 + answers[i - (j**2)]
UpperCamelCase_: int = min(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: List[Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 |
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[Any] = Text("""CPU""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(1 )]
UpperCamelCase_: Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[int] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
UpperCamelCase_: Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""Model""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
UpperCamelCase_: List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Tuple = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
UpperCamelCase_: int = 0.46 / 4
UpperCamelCase_: Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 670 | 1 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Tuple , snake_case_ : str , snake_case_ : int=13 , snake_case_ : str=7 , snake_case_ : Any=True , snake_case_ : List[Any]=True , snake_case_ : str=True , snake_case_ : int=True , snake_case_ : Any=99 , snake_case_ : List[Any]=64 , snake_case_ : List[str]=32 , snake_case_ : Any=5 , snake_case_ : List[str]=4 , snake_case_ : List[Any]=37 , snake_case_ : Dict="gelu" , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Optional[Any]=0.1 , snake_case_ : List[Any]=512 , snake_case_ : Optional[Any]=16 , snake_case_ : str=2 , snake_case_ : str=0.02 , snake_case_ : List[str]=3 , snake_case_ : List[Any]=4 , snake_case_ : Optional[int]=None , ):
UpperCamelCase_: Optional[Any] = parent
UpperCamelCase_: str = batch_size
UpperCamelCase_: Optional[Any] = seq_length
UpperCamelCase_: str = is_training
UpperCamelCase_: Any = use_input_mask
UpperCamelCase_: int = use_token_type_ids
UpperCamelCase_: Optional[int] = use_labels
UpperCamelCase_: Optional[int] = vocab_size
UpperCamelCase_: List[str] = hidden_size
UpperCamelCase_: Union[str, Any] = embedding_size
UpperCamelCase_: Any = num_hidden_layers
UpperCamelCase_: Any = num_attention_heads
UpperCamelCase_: str = intermediate_size
UpperCamelCase_: Dict = hidden_act
UpperCamelCase_: List[Any] = hidden_dropout_prob
UpperCamelCase_: Optional[int] = attention_probs_dropout_prob
UpperCamelCase_: str = max_position_embeddings
UpperCamelCase_: Optional[Any] = type_vocab_size
UpperCamelCase_: List[Any] = type_sequence_label_size
UpperCamelCase_: Dict = initializer_range
UpperCamelCase_: int = num_labels
UpperCamelCase_: Tuple = num_choices
UpperCamelCase_: List[Any] = scope
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: Tuple = None
if self.use_input_mask:
UpperCamelCase_: Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_: int = None
if self.use_token_type_ids:
UpperCamelCase_: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_: Any = None
UpperCamelCase_: Dict = None
UpperCamelCase_: List[str] = None
if self.use_labels:
UpperCamelCase_: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_: str = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_: List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self : Tuple ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : Optional[Any] ):
UpperCamelCase_: Optional[Any] = MegatronBertModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
UpperCamelCase_: Optional[int] = model(snake_case_ , token_type_ids=snake_case_ )
UpperCamelCase_: Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Optional[Any] ):
UpperCamelCase_: Optional[Any] = MegatronBertForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: Optional[int] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Optional[Any] ):
UpperCamelCase_: List[str] = MegatronBertForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: Any = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ):
UpperCamelCase_: Tuple = MegatronBertForNextSentencePrediction(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: int = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase__ ( self : str , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : List[str] ):
UpperCamelCase_: List[Any] = MegatronBertForPreTraining(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: Optional[Any] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , next_sentence_label=snake_case_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : int , snake_case_ : Optional[int] ):
UpperCamelCase_: List[Any] = MegatronBertForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: List[str] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self : Any , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : int ):
UpperCamelCase_: List[Any] = self.num_labels
UpperCamelCase_: List[Any] = MegatronBertForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: int = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : str , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Any ):
UpperCamelCase_: Optional[Any] = self.num_labels
UpperCamelCase_: Any = MegatronBertForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: List[str] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Tuple , snake_case_ : int , snake_case_ : str , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : str ):
UpperCamelCase_: Tuple = self.num_choices
UpperCamelCase_: Any = MegatronBertForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_: List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_: Any = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: int = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
),
): List[str] = config_and_inputs
UpperCamelCase_: Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( _A , _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[str] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase : int = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : int = True
# test_resize_embeddings = False
__UpperCamelCase : str = False
def lowerCAmelCase__ ( self : int , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Any=False ):
UpperCamelCase_: int = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
UpperCamelCase_: str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case_ )
UpperCamelCase_: List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Dict = MegatronBertModelTester(self )
UpperCamelCase_: int = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self : str ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*snake_case_ )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*snake_case_ )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*snake_case_ )
def A__ ( lowerCamelCase ) -> List[Any]:
return torch.tensor(
lowerCamelCase , dtype=torch.long , device=lowerCamelCase , )
lowerCamelCase_ : str = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip("""Model is not available.""" )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
UpperCamelCase_: Optional[int] = os.path.join(os.environ["""MYDIR"""] , snake_case_ )
UpperCamelCase_: Tuple = MegatronBertModel.from_pretrained(snake_case_ )
model.to(snake_case_ )
model.half()
UpperCamelCase_: Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
UpperCamelCase_: Optional[int] = model(snake_case_ )[0]
UpperCamelCase_: Tuple = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , snake_case_ )
UpperCamelCase_: Optional[int] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
UpperCamelCase_: Optional[Any] = output[0, ii, jj]
UpperCamelCase_: Dict = expected[3 * ii + jj]
UpperCamelCase_: Optional[int] = """ii={} jj={} a={} b={}""".format(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
self.assertTrue(math.isclose(snake_case_ , snake_case_ , rel_tol=snake_case_ , abs_tol=snake_case_ ) , msg=snake_case_ )
| 670 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = """laion/clap-htsat-unfused"""
UpperCamelCase_: List[str] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Dict = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[str] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Optional[Any] = floats_list((3, 1000) )
UpperCamelCase_: List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: int = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.get_feature_extractor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Dict = """This is a test string"""
UpperCamelCase_: Tuple = processor(text=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = self.get_feature_extractor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Tuple = processor.batch_decode(snake_case_ )
UpperCamelCase_: str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = self.get_feature_extractor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 670 | 1 |
from __future__ import annotations
def A__ ( lowerCamelCase , lowerCamelCase ) -> Tuple:
print(F'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(SCREAMING_SNAKE_CASE_ ):
print(F'''{i}\t\t{d}''' )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
for j in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: str = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> list[float]:
UpperCamelCase_: str = [float("""inf""" )] * vertex_count
UpperCamelCase_: Union[str, Any] = 0.0
for _ in range(vertex_count - 1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: str = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
UpperCamelCase_: Tuple = distance[u] + w
UpperCamelCase_: Optional[Any] = check_negative_cycle(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : int = int(input("""Enter number of vertices: """).strip())
lowerCamelCase_ : int = int(input("""Enter number of edges: """).strip())
lowerCamelCase_ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
lowerCamelCase_ : Dict = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
lowerCamelCase_ : Optional[Any] = {"src": src, "dst": dest, "weight": weight}
lowerCamelCase_ : Optional[Any] = int(input("""\nEnter shortest path source:""").strip())
lowerCamelCase_ : Any = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 700 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Tuple=None , **snake_case_ : List[str] ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case_ , )
super().__init__(args=snake_case_ , **snake_case_ )
| 670 | 0 |
import math
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(__A )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowerCamelCase_ : List[Any] = """Enter the base and the power separated by a comma: """
lowerCamelCase_ , lowerCamelCase_ : str = map(int, input(prompt).split(""","""))
lowerCamelCase_ , lowerCamelCase_ : str = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowerCamelCase_ : List[Any] = res(xa, ya)
lowerCamelCase_ : List[str] = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 701 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = logging.get_logger("""transformers.models.speecht5""")
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
hf_model.apply_weight_norm()
UpperCamelCase_: Union[str, Any] = checkpoint["""input_conv.weight_g"""]
UpperCamelCase_: Optional[int] = checkpoint["""input_conv.weight_v"""]
UpperCamelCase_: List[Any] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCamelCase_: Dict = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCamelCase_: Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCamelCase_: int = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCamelCase_: int = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase_: Tuple = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase_: List[str] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if config_path is not None:
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_: str = SpeechTaHifiGanConfig()
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGan(lowerCamelCase )
UpperCamelCase_: str = torch.load(lowerCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = np.load(lowerCamelCase )
UpperCamelCase_: int = stats[0].reshape(-1 )
UpperCamelCase_: Union[str, Any] = stats[1].reshape(-1 )
UpperCamelCase_: Dict = torch.from_numpy(lowerCamelCase ).float()
UpperCamelCase_: Optional[Any] = torch.from_numpy(lowerCamelCase ).float()
model.save_pretrained(lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 670 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Any = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class _UpperCamelCase ( snake_case__ ):
'''simple docstring'''
__UpperCamelCase : str = """openai-gpt"""
__UpperCamelCase : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] , snake_case_ : List[str]=4_0478 , snake_case_ : Dict=512 , snake_case_ : Optional[int]=768 , snake_case_ : Optional[Any]=12 , snake_case_ : Optional[Any]=12 , snake_case_ : Tuple="gelu" , snake_case_ : List[str]=0.1 , snake_case_ : int=0.1 , snake_case_ : Dict=0.1 , snake_case_ : List[Any]=1e-5 , snake_case_ : List[str]=0.02 , snake_case_ : Dict="cls_index" , snake_case_ : Optional[Any]=True , snake_case_ : Optional[int]=None , snake_case_ : Any=True , snake_case_ : Dict=0.1 , **snake_case_ : Union[str, Any] , ):
UpperCamelCase_: Dict = vocab_size
UpperCamelCase_: Optional[int] = n_positions
UpperCamelCase_: Dict = n_embd
UpperCamelCase_: Any = n_layer
UpperCamelCase_: Optional[int] = n_head
UpperCamelCase_: Optional[int] = afn
UpperCamelCase_: int = resid_pdrop
UpperCamelCase_: Tuple = embd_pdrop
UpperCamelCase_: Any = attn_pdrop
UpperCamelCase_: List[str] = layer_norm_epsilon
UpperCamelCase_: Union[str, Any] = initializer_range
UpperCamelCase_: Optional[Any] = summary_type
UpperCamelCase_: Tuple = summary_use_proj
UpperCamelCase_: Dict = summary_activation
UpperCamelCase_: List[Any] = summary_first_dropout
UpperCamelCase_: Dict = summary_proj_to_labels
super().__init__(**UpperCAmelCase_ )
| 702 |
lowerCamelCase_ : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Optional[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 670 | 0 |
from manim import *
class _UpperCamelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(6 )]
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: int = VGroup(*A_ ).arrange(A_ , buff=0 )
UpperCamelCase_: Optional[Any] = VGroup(*A_ ).arrange(A_ , buff=0 )
UpperCamelCase_: str = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
UpperCamelCase_: Any = Text("""CPU""" , font_size=24 )
UpperCamelCase_: Any = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
UpperCamelCase_: List[str] = [mem.copy() for i in range(4 )]
UpperCamelCase_: Dict = VGroup(*A_ ).arrange(A_ , buff=0 )
UpperCamelCase_: Union[str, Any] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
UpperCamelCase_: List[Any] = [mem.copy() for i in range(6 )]
UpperCamelCase_: str = VGroup(*A_ ).arrange(A_ , buff=0 )
UpperCamelCase_: Dict = Text("""Model""" , font_size=24 )
UpperCamelCase_: Union[str, Any] = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
UpperCamelCase_: Any = []
for i, rect in enumerate(A_ ):
rect.set_stroke(A_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCamelCase_: Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=A_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=A_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=A_ , buff=0.0 )
self.add(A_ )
cpu_targs.append(A_ )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: int = VGroup(*A_ ).arrange(A_ , buff=0 )
UpperCamelCase_: List[Any] = Text("""Loaded Checkpoint""" , font_size=24 )
UpperCamelCase_: Union[str, Any] = Group(A_ , A_ ).arrange(A_ , aligned_edge=A_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCamelCase_: int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: int = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
UpperCamelCase_: List[Any] = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCamelCase_: List[str] = MarkupText(
f'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ ) , Write(A_ ) )
self.play(Write(A_ , run_time=1 ) , Create(A_ , run_time=1 ) )
UpperCamelCase_: Dict = []
UpperCamelCase_: List[Any] = []
for i, rect in enumerate(A_ ):
UpperCamelCase_: Union[str, Any] = fill.copy().set_fill(A_ , opacity=0.7 )
target.move_to(A_ )
first_animations.append(GrowFromCenter(A_ , run_time=1 ) )
UpperCamelCase_: List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(A_ , run_time=1.5 ) )
self.play(*A_ )
self.play(*A_ )
self.wait()
| 703 |
import cva
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , snake_case_ : float , snake_case_ : int ):
if k in (0.04, 0.06):
UpperCamelCase_: Union[str, Any] = k
UpperCamelCase_: Union[str, Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : int ):
return str(self.k )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : str ):
UpperCamelCase_: int = cva.imread(snake_case_ , 0 )
UpperCamelCase_, UpperCamelCase_: List[Any] = img.shape
UpperCamelCase_: list[list[int]] = []
UpperCamelCase_: int = img.copy()
UpperCamelCase_: Any = cva.cvtColor(snake_case_ , cva.COLOR_GRAY2RGB )
UpperCamelCase_, UpperCamelCase_: List[Any] = np.gradient(snake_case_ )
UpperCamelCase_: Optional[Any] = dx**2
UpperCamelCase_: Dict = dy**2
UpperCamelCase_: Optional[Any] = dx * dy
UpperCamelCase_: str = 0.04
UpperCamelCase_: int = self.window_size // 2
for y in range(snake_case_ , h - offset ):
for x in range(snake_case_ , w - offset ):
UpperCamelCase_: List[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = (wxx * wyy) - (wxy**2)
UpperCamelCase_: Optional[int] = wxx + wyy
UpperCamelCase_: Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = HarrisCorner(0.04, 3)
lowerCamelCase_ , lowerCamelCase_ : Any = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 670 | 0 |
import math
def A__ ( lowerCamelCase , lowerCamelCase ) -> float:
if (
not isinstance(_lowercase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def A__ ( lowerCamelCase , lowerCamelCase ) -> float:
if (
not isinstance(_lowercase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
import random
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False ) -> dict:
UpperCamelCase_: dict = {i: [] for i in range(lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase ):
for j in range(i + 1 , lowerCamelCase ):
if random.random() < probability:
graph[i].append(lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase )
return graph
def A__ ( lowerCamelCase ) -> dict:
return {
i: [j for j in range(lowerCamelCase ) if i != j] for i in range(lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class _UpperCamelCase ( lowercase__ ):
'''simple docstring'''
__UpperCamelCase : Dict = 42
__UpperCamelCase : Any = 42
__UpperCamelCase : List[str] = None
class _UpperCamelCase ( lowercase__ , lowercase__ ):
'''simple docstring'''
__UpperCamelCase : int = 2
@register_to_config
def __init__( self : List[str] , snake_case_ : float = 0.02 , snake_case_ : float = 100 , snake_case_ : float = 1.007 , snake_case_ : float = 80 , snake_case_ : float = 0.05 , snake_case_ : float = 50 , ):
UpperCamelCase_: List[str] = sigma_max
# setable values
UpperCamelCase_: int = None
UpperCamelCase_: np.IntTensor = None
UpperCamelCase_: torch.FloatTensor = None # sigma(t_i)
def lowerCAmelCase__ ( self : Any , snake_case_ : torch.FloatTensor , snake_case_ : Optional[int] = None ):
return sample
def lowerCAmelCase__ ( self : str , snake_case_ : int , snake_case_ : Union[str, torch.device] = None ):
UpperCamelCase_: Tuple = num_inference_steps
UpperCamelCase_: int = np.arange(0 , self.num_inference_steps )[::-1].copy()
UpperCamelCase_: Union[str, Any] = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase )
UpperCamelCase_: Any = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
UpperCamelCase_: Optional[int] = torch.tensor(__lowerCamelCase , dtype=torch.floataa , device=__lowerCamelCase )
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : torch.FloatTensor , snake_case_ : float , snake_case_ : Optional[torch.Generator] = None ):
if self.config.s_min <= sigma <= self.config.s_max:
UpperCamelCase_: Dict = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
UpperCamelCase_: Tuple = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCamelCase_: Tuple = self.config.s_noise * randn_tensor(sample.shape , generator=__lowerCamelCase ).to(sample.device )
UpperCamelCase_: str = sigma + gamma * sigma
UpperCamelCase_: List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : torch.FloatTensor , snake_case_ : float , snake_case_ : float , snake_case_ : torch.FloatTensor , snake_case_ : bool = True , ):
UpperCamelCase_: str = sample_hat + sigma_hat * model_output
UpperCamelCase_: Optional[Any] = (sample_hat - pred_original_sample) / sigma_hat
UpperCamelCase_: List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__lowerCamelCase , derivative=__lowerCamelCase , pred_original_sample=__lowerCamelCase )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : torch.FloatTensor , snake_case_ : float , snake_case_ : float , snake_case_ : torch.FloatTensor , snake_case_ : torch.FloatTensor , snake_case_ : torch.FloatTensor , snake_case_ : bool = True , ):
UpperCamelCase_: Any = sample_prev + sigma_prev * model_output
UpperCamelCase_: Any = (sample_prev - pred_original_sample) / sigma_prev
UpperCamelCase_: List[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__lowerCamelCase , derivative=__lowerCamelCase , pred_original_sample=__lowerCamelCase )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Dict ):
raise NotImplementedError()
| 705 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Optional[int] = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase_: Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = logging.get_verbosity()
UpperCamelCase_: int = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Union[str, Any] = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(snake_case_ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def lowerCAmelCase__ ( self : Optional[int] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: str = os.getenv("""TRANSFORMERS_VERBOSITY""" , snake_case_ )
UpperCamelCase_: Any = logging.log_levels[env_level_str]
UpperCamelCase_: Dict = logging.get_verbosity()
self.assertEqual(
snake_case_ , snake_case_ , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCamelCase_: str = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def lowerCAmelCase__ ( self : List[Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: str = logging.logging.getLogger()
with CaptureLogger(snake_case_ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def lowerCAmelCase__ ( self : List[Any] ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Any = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
def A__ ( ) -> Union[str, Any]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 670 | 0 |
from torch import nn
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , snake_case_ : Tuple , snake_case_ : Optional[int] ):
super().__init__()
UpperCamelCase_: Dict = class_size
UpperCamelCase_: Tuple = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
UpperCamelCase_: Dict = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : Union[str, Any] ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
UpperCamelCase_: int = self.mlp(UpperCAmelCase_ )
return logits
| 706 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase_ : Optional[int] = HUGGINGFACE_HUB_CACHE
lowerCamelCase_ : List[str] = """config.json"""
lowerCamelCase_ : Any = """diffusion_pytorch_model.bin"""
lowerCamelCase_ : Union[str, Any] = """diffusion_flax_model.msgpack"""
lowerCamelCase_ : Dict = """model.onnx"""
lowerCamelCase_ : List[Any] = """diffusion_pytorch_model.safetensors"""
lowerCamelCase_ : Optional[Any] = """weights.pb"""
lowerCamelCase_ : Optional[Any] = """https://huggingface.co"""
lowerCamelCase_ : Union[str, Any] = default_cache_path
lowerCamelCase_ : Tuple = """diffusers_modules"""
lowerCamelCase_ : Optional[Any] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowerCamelCase_ : str = ["""fp16""", """non-ema"""]
lowerCamelCase_ : List[Any] = """.self_attn"""
| 670 | 0 |
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowerCamelCase_ : Optional[int] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def A__ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None ) -> str:
UpperCamelCase_: Tuple = True
while ask_again:
UpperCamelCase_: Optional[Any] = input(lowerCamelCase )
try:
if default is not None and len(lowerCamelCase ) == 0:
return default
return convert_value(lowerCamelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase=[] , lowerCamelCase=None , lowerCamelCase=0 ) -> Any:
UpperCamelCase_: Tuple = BulletMenu(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Dict = menu.run(default_choice=lowerCamelCase )
return convert_value(lowerCamelCase ) if convert_value is not None else result
def A__ ( lowerCamelCase ) -> List[str]:
UpperCamelCase_: str = int(lowerCamelCase )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def A__ ( lowerCamelCase ) -> Optional[int]:
UpperCamelCase_: List[str] = int(lowerCamelCase )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def A__ ( lowerCamelCase ) -> Any:
UpperCamelCase_: List[Any] = int(lowerCamelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def A__ ( lowerCamelCase ) -> str:
UpperCamelCase_: Union[str, Any] = int(lowerCamelCase )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def A__ ( lowerCamelCase ) -> Dict:
UpperCamelCase_: Dict = int(lowerCamelCase )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def A__ ( lowerCamelCase ) -> str:
return {"yes": True, "no": False}[value.lower()]
class _UpperCamelCase ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : int ):
UpperCamelCase_: str = super()._format_usage(_lowercase , _lowercase , _lowercase , _lowercase )
UpperCamelCase_: int = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 707 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase_: List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCamelCase_: str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Any = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
UpperCamelCase_: Dict = [sys.executable] + distributed_args
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
| 670 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = tempfile.mkdtemp()
# fmt: off
UpperCamelCase_: Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
UpperCamelCase_: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCamelCase_: List[Any] = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
UpperCamelCase_: Dict = os.path.join(self.tmpdirname , snake_case_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any , **snake_case_ : Any ):
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Any ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase_: Any = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Optional[int] = self.get_image_processor()
UpperCamelCase_: int = VisionTextDualEncoderProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Optional[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: str = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: str = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Any = self.get_image_processor()
UpperCamelCase_: Optional[int] = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: str = self.prepare_image_inputs()
UpperCamelCase_: Any = image_processor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: Union[str, Any] = processor(images=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: str = self.get_image_processor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = VisionTextDualEncoderProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: int = '''lower newer'''
UpperCamelCase_: List[str] = processor(text=snake_case_ )
UpperCamelCase_: Tuple = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Optional[int] = self.get_image_processor()
UpperCamelCase_: Dict = self.get_tokenizer()
UpperCamelCase_: Dict = VisionTextDualEncoderProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: Dict = '''lower newer'''
UpperCamelCase_: List[Any] = self.prepare_image_inputs()
UpperCamelCase_: Dict = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(snake_case_ ):
processor()
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: List[str] = self.get_image_processor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: str = VisionTextDualEncoderProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Tuple = processor.batch_decode(snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Union[str, Any] = self.get_image_processor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: Tuple = VisionTextDualEncoderProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: Optional[int] = '''lower newer'''
UpperCamelCase_: Dict = self.prepare_image_inputs()
UpperCamelCase_: Any = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 708 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = BarthezTokenizer
__UpperCamelCase : str = BarthezTokenizerFast
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = True
def lowerCAmelCase__ ( self : Optional[int] ):
super().setUp()
UpperCamelCase_: Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ )
UpperCamelCase_: Dict = tokenizer
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: str = """<pad>"""
UpperCamelCase_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case_ ) , 10_1122 )
def lowerCAmelCase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_: Union[str, Any] = [0, 57, 3018, 7_0307, 91, 2]
UpperCamelCase_: Union[str, Any] = self.tokenizer(
snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase_: Any = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
if not self.test_rust_tokenizer:
return
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase_: str = """I was born in 92000, and this is falsé."""
UpperCamelCase_: str = tokenizer.tokenize(snake_case_ )
UpperCamelCase_: int = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase_: int = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = self.get_rust_tokenizer()
UpperCamelCase_: Tuple = tokenizer.encode(snake_case_ )
UpperCamelCase_: Tuple = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase__ ( self : int ):
# fmt: off
UpperCamelCase_: Optional[Any] = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase_: str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=snake_case_ , )
| 670 | 0 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def A__ ( lowerCamelCase , lowerCamelCase ) -> Tuple:
UpperCamelCase_: Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
UpperCamelCase_: Optional[Any] = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert("""RGB""" )
UpperCamelCase_: Optional[int] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ),
] )
UpperCamelCase_: Optional[Any] = transform(__lowerCAmelCase ).unsqueeze(0 ).to(__lowerCAmelCase )
return image
def A__ ( lowerCamelCase ) -> Any:
if "visual_encoder" in key:
UpperCamelCase_: Tuple = re.sub("""visual_encoder*""" , """vision_model.encoder""" , __lowerCAmelCase )
if "blocks" in key:
UpperCamelCase_: Dict = re.sub(r"""blocks""" , """layers""" , __lowerCAmelCase )
if "attn" in key:
UpperCamelCase_: List[Any] = re.sub(r"""attn""" , """self_attn""" , __lowerCAmelCase )
if "norm1" in key:
UpperCamelCase_: List[Any] = re.sub(r"""norm1""" , """layer_norm1""" , __lowerCAmelCase )
if "norm2" in key:
UpperCamelCase_: Dict = re.sub(r"""norm2""" , """layer_norm2""" , __lowerCAmelCase )
if "encoder.norm" in key:
UpperCamelCase_: int = re.sub(r"""encoder.norm""" , """post_layernorm""" , __lowerCAmelCase )
if "encoder.patch_embed.proj" in key:
UpperCamelCase_: List[str] = re.sub(r"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , __lowerCAmelCase )
if "encoder.pos_embed" in key:
UpperCamelCase_: List[str] = re.sub(r"""encoder.pos_embed""" , """embeddings.position_embedding""" , __lowerCAmelCase )
if "encoder.cls_token" in key:
UpperCamelCase_: str = re.sub(r"""encoder.cls_token""" , """embeddings.class_embedding""" , __lowerCAmelCase )
if "self_attn" in key:
UpperCamelCase_: Union[str, Any] = re.sub(r"""self_attn.proj""" , """self_attn.projection""" , __lowerCAmelCase )
return key
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase=None ) -> Tuple:
if config_path is not None:
UpperCamelCase_: Optional[Any] = BlipConfig.from_pretrained(__lowerCAmelCase )
else:
UpperCamelCase_: Any = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
UpperCamelCase_: int = BlipForConditionalGeneration(__lowerCAmelCase ).eval()
UpperCamelCase_: str = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
UpperCamelCase_: str = blip_decoder(pretrained=__lowerCAmelCase , image_size=3_84 , vit="""base""" )
UpperCamelCase_: Dict = pt_model.eval()
UpperCamelCase_: Union[str, Any] = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase_: Union[str, Any] = modified_state_dict.pop(__lowerCAmelCase )
UpperCamelCase_: Tuple = rename_key(__lowerCAmelCase )
UpperCamelCase_: Optional[Any] = value
hf_model.load_state_dict(__lowerCAmelCase )
UpperCamelCase_: Dict = 3_84
UpperCamelCase_: Tuple = load_demo_image(image_size=__lowerCAmelCase , device="""cpu""" )
UpperCamelCase_: Tuple = BertTokenizer.from_pretrained("""bert-base-uncased""" )
UpperCamelCase_: Dict = tokenizer(["""a picture of"""] ).input_ids
UpperCamelCase_: List[Any] = hf_model.generate(__lowerCAmelCase , __lowerCAmelCase )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
UpperCamelCase_: Any = hf_model.generate(__lowerCAmelCase )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__lowerCAmelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCamelCase_: str = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
UpperCamelCase_: List[Any] = blip_vqa(pretrained=__lowerCAmelCase , image_size=__lowerCAmelCase , vit="""base""" )
vqa_model.eval()
UpperCamelCase_: Union[str, Any] = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase_: List[Any] = modified_state_dict.pop(__lowerCAmelCase )
UpperCamelCase_: int = rename_key(__lowerCAmelCase )
UpperCamelCase_: int = value
UpperCamelCase_: Optional[Any] = BlipForQuestionAnswering(__lowerCAmelCase )
hf_vqa_model.load_state_dict(__lowerCAmelCase )
UpperCamelCase_: str = ["""How many dogs are in this image?"""]
UpperCamelCase_: Union[str, Any] = tokenizer(__lowerCAmelCase , return_tensors="""pt""" ).input_ids
UpperCamelCase_: Dict = hf_vqa_model.generate(__lowerCAmelCase , __lowerCAmelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
UpperCamelCase_: Any = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
UpperCamelCase_: List[str] = blip_itm(pretrained=__lowerCAmelCase , image_size=__lowerCAmelCase , vit="""base""" )
itm_model.eval()
UpperCamelCase_: Optional[int] = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase_: int = modified_state_dict.pop(__lowerCAmelCase )
UpperCamelCase_: str = rename_key(__lowerCAmelCase )
UpperCamelCase_: Dict = value
UpperCamelCase_: Union[str, Any] = BlipForImageTextRetrieval(__lowerCAmelCase )
UpperCamelCase_: List[str] = ["""A picture of a woman with a dog sitting in a beach"""]
UpperCamelCase_: List[Any] = tokenizer(
__lowerCAmelCase , return_tensors="""pt""" , padding="""max_length""" , truncation=__lowerCAmelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(__lowerCAmelCase )
hf_itm_model.eval()
UpperCamelCase_: Union[str, Any] = hf_itm_model(__lowerCAmelCase , __lowerCAmelCase , use_itm_head=__lowerCAmelCase )
UpperCamelCase_: Union[str, Any] = hf_itm_model(__lowerCAmelCase , __lowerCAmelCase , use_itm_head=__lowerCAmelCase )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase_ : int = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 709 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
while second != 0:
UpperCamelCase_: Optional[Any] = first & second
first ^= second
UpperCamelCase_: Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : List[Any] = int(input("""Enter the first number: """).strip())
lowerCamelCase_ : Tuple = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 670 | 0 |
from torch import nn
def A__ ( lowerCamelCase ) -> List[Any]:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 710 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCamelCase_ : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCamelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Optional[str] = field(default=_A , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCAmelCase__ ( self : Dict ):
if self.train_file is not None:
UpperCamelCase_: Union[str, Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCamelCase_: Dict = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : PreTrainedTokenizerBase
__UpperCamelCase : Union[bool, str, PaddingStrategy] = True
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[int] = None
def __call__( self : Optional[int] , snake_case_ : Dict ):
UpperCamelCase_: Dict = """label""" if """label""" in features[0].keys() else """labels"""
UpperCamelCase_: int = [feature.pop(snake_case_ ) for feature in features]
UpperCamelCase_: Optional[Any] = len(snake_case_ )
UpperCamelCase_: List[str] = len(features[0]["""input_ids"""] )
UpperCamelCase_: Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
UpperCamelCase_: Any = list(chain(*snake_case_ ) )
UpperCamelCase_: List[Any] = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
UpperCamelCase_: Tuple = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
UpperCamelCase_: Optional[int] = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def A__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_: str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase_: Dict = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCamelCase_: List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase_: List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCamelCase_: List[str] = {}
if data_args.train_file is not None:
UpperCamelCase_: List[Any] = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase_: Optional[int] = data_args.validation_file
UpperCamelCase_: Any = data_args.train_file.split(""".""" )[-1]
UpperCamelCase_: Tuple = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCamelCase_: int = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCamelCase_: Union[str, Any] = [F'''ending{i}''' for i in range(4 )]
UpperCamelCase_: str = """sent1"""
UpperCamelCase_: List[str] = """sent2"""
if data_args.max_seq_length is None:
UpperCamelCase_: int = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
UpperCamelCase_: Optional[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
UpperCamelCase_: Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase ):
UpperCamelCase_: Optional[Any] = [[context] * 4 for context in examples[context_name]]
UpperCamelCase_: Dict = examples[question_header_name]
UpperCamelCase_: List[str] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
UpperCamelCase_: str = list(chain(*lowerCamelCase ) )
UpperCamelCase_: Any = list(chain(*lowerCamelCase ) )
# Tokenize
UpperCamelCase_: Any = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
UpperCamelCase_: str = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
UpperCamelCase_: Union[str, Any] = min(len(lowerCamelCase ) , data_args.max_train_samples )
UpperCamelCase_: Optional[int] = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
UpperCamelCase_: str = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
UpperCamelCase_: Dict = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
UpperCamelCase_: str = min(len(lowerCamelCase ) , data_args.max_eval_samples )
UpperCamelCase_: Tuple = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
UpperCamelCase_: str = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCamelCase_: str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase ):
UpperCamelCase_, UpperCamelCase_: List[str] = eval_predictions
UpperCamelCase_: Optional[Any] = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCamelCase_: Union[str, Any] = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase_: List[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase_: int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase_: str = last_checkpoint
UpperCamelCase_: Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase_: Tuple = train_result.metrics
UpperCamelCase_: Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""train""" , lowerCamelCase )
trainer.save_metrics("""train""" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase_: Optional[Any] = trainer.evaluate()
UpperCamelCase_: Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""eval""" , lowerCamelCase )
trainer.save_metrics("""eval""" , lowerCamelCase )
UpperCamelCase_: Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def A__ ( lowerCamelCase ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 670 | 0 |
from __future__ import annotations
lowerCamelCase_ : Optional[Any] = tuple[int, int, int]
lowerCamelCase_ : str = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowerCamelCase_ : List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
lowerCamelCase_ : Union[str, Any] = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
lowerCamelCase_ : Dict = 'FOBHMDKEXQNRAULPGSJVTYICZW'
lowerCamelCase_ : Optional[int] = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
lowerCamelCase_ : Any = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
lowerCamelCase_ : List[str] = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
lowerCamelCase_ : Any = 'SGLCPQWZHKXAREONTFBVIYJUDM'
lowerCamelCase_ : int = 'HVSICLTYKQUBXDWAJZOMFGPREN'
lowerCamelCase_ : Tuple = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
lowerCamelCase_ : str = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
lowerCamelCase_ : str = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if (unique_rotsel := len(set(_lowerCAmelCase ) )) < 3:
UpperCamelCase_: List[str] = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(_lowerCAmelCase )
# Checks if rotor positions are valid
UpperCamelCase_: int = rotpos
if not 0 < rotorposa <= len(_lowerCAmelCase ):
UpperCamelCase_: Optional[Any] = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(_lowerCAmelCase )
if not 0 < rotorposa <= len(_lowerCAmelCase ):
UpperCamelCase_: Union[str, Any] = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(_lowerCAmelCase )
if not 0 < rotorposa <= len(_lowerCAmelCase ):
UpperCamelCase_: Optional[int] = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(_lowerCAmelCase )
# Validates string and returns dict
UpperCamelCase_: str = _plugboard(_lowerCAmelCase )
return rotpos, rotsel, pbdict
def A__ ( lowerCamelCase ) -> List[str]:
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase_: List[str] = F'''Plugboard setting isn\'t type string ({type(_lowerCAmelCase )})'''
raise TypeError(_lowerCAmelCase )
elif len(_lowerCAmelCase ) % 2 != 0:
UpperCamelCase_: Optional[int] = F'''Odd number of symbols ({len(_lowerCAmelCase )})'''
raise Exception(_lowerCAmelCase )
elif pbstring == "":
return {}
pbstring.replace(""" """ , """""" )
# Checks if all characters are unique
UpperCamelCase_: Union[str, Any] = set()
for i in pbstring:
if i not in abc:
UpperCamelCase_: List[Any] = F'''\'{i}\' not in list of symbols'''
raise Exception(_lowerCAmelCase )
elif i in tmppbl:
UpperCamelCase_: Optional[int] = F'''Duplicate symbol ({i})'''
raise Exception(_lowerCAmelCase )
else:
tmppbl.add(_lowerCAmelCase )
del tmppbl
# Created the dictionary
UpperCamelCase_: List[Any] = {}
for j in range(0 , len(_lowerCAmelCase ) - 1 , 2 ):
UpperCamelCase_: List[Any] = pbstring[j + 1]
UpperCamelCase_: Union[str, Any] = pbstring[j]
return pb
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = (rotora, rotora, rotora) , lowerCamelCase = "" , ) -> Dict:
'''simple docstring'''
UpperCamelCase_: Optional[int] = text.upper()
UpperCamelCase_: Union[str, Any] = _validator(
_lowerCAmelCase , _lowerCAmelCase , plugb.upper() )
UpperCamelCase_: Tuple = rotor_position
UpperCamelCase_: Tuple = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
UpperCamelCase_: List[Any] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
UpperCamelCase_: int = plugboard[symbol]
# rotor ra --------------------------
UpperCamelCase_: int = abc.index(_lowerCAmelCase ) + rotorposa
UpperCamelCase_: Dict = rotora[index % len(_lowerCAmelCase )]
# rotor rb --------------------------
UpperCamelCase_: int = abc.index(_lowerCAmelCase ) + rotorposa
UpperCamelCase_: Union[str, Any] = rotora[index % len(_lowerCAmelCase )]
# rotor rc --------------------------
UpperCamelCase_: List[str] = abc.index(_lowerCAmelCase ) + rotorposa
UpperCamelCase_: int = rotora[index % len(_lowerCAmelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
UpperCamelCase_: List[str] = reflector[symbol]
# 2nd rotors
UpperCamelCase_: List[str] = abc[rotora.index(_lowerCAmelCase ) - rotorposa]
UpperCamelCase_: Any = abc[rotora.index(_lowerCAmelCase ) - rotorposa]
UpperCamelCase_: List[Any] = abc[rotora.index(_lowerCAmelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
UpperCamelCase_: List[str] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowerCAmelCase ):
UpperCamelCase_: List[str] = 0
rotorposa += 1
if rotorposa >= len(_lowerCAmelCase ):
UpperCamelCase_: str = 0
rotorposa += 1
if rotorposa >= len(_lowerCAmelCase ):
UpperCamelCase_: str = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase_ : int = 'This is my Python script that emulates the Enigma machine from WWII.'
lowerCamelCase_ : Union[str, Any] = (1, 1, 1)
lowerCamelCase_ : int = 'pictures'
lowerCamelCase_ : Tuple = (rotora, rotora, rotora)
lowerCamelCase_ : Optional[Any] = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 711 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : Union[str, Any] = logging.getLogger()
lowerCamelCase_ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Dict ):
os.makedirs(snake_case_ , exist_ok=snake_case_ )
UpperCamelCase_: int = {"""source""": """What is love ?""", """target""": """life"""}
UpperCamelCase_: Tuple = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase_: Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(snake_case_ , f'''{split}.{field}''' ) , """w""" ) as f:
f.write(snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : int , snake_case_ : str = "pytorch" ):
UpperCamelCase_: Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Dict = os.path.join(snake_case_ , """output""" )
UpperCamelCase_: Any = os.path.join(snake_case_ , """data""" )
self._create_dummy_data(data_dir=snake_case_ )
UpperCamelCase_: Union[str, Any] = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
UpperCamelCase_: Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(snake_case_ , env=self.get_env() )
UpperCamelCase_: Optional[int] = os.path.join(snake_case_ , """metrics.json""" )
with open(snake_case_ ) as f:
UpperCamelCase_: Any = json.load(snake_case_ )
return result
@require_torch_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 670 | 0 |
def A__ ( lowerCamelCase ) -> int:
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(__lowerCAmelCase ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : int , snake_case_ : Optional[Any]=None , snake_case_ : List[str]=None ):
UpperCamelCase_: List[Any] = data
UpperCamelCase_: List[Any] = previous
UpperCamelCase_: Tuple = next_node
def __str__( self : Dict ):
return f'''{self.data}'''
def lowerCAmelCase__ ( self : List[str] ):
return self.data
def lowerCAmelCase__ ( self : Any ):
return self.next
def lowerCAmelCase__ ( self : List[str] ):
return self.previous
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = head
def __iter__( self : Union[str, Any] ):
return self
def lowerCAmelCase__ ( self : Union[str, Any] ):
if not self.current:
raise StopIteration
else:
UpperCamelCase_: Dict = self.current.get_data()
UpperCamelCase_: Tuple = self.current.get_next()
return value
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int ):
UpperCamelCase_: Optional[int] = None # First node in list
UpperCamelCase_: Dict = None # Last node in list
def __str__( self : Tuple ):
UpperCamelCase_: int = self.head
UpperCamelCase_: Tuple = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase_: List[str] = current.get_next()
return " ".join(str(snake_case_ ) for node in nodes )
def __contains__( self : int , snake_case_ : int ):
UpperCamelCase_: Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase_: Any = current.get_next()
return False
def __iter__( self : Any ):
return LinkedListIterator(self.head )
def lowerCAmelCase__ ( self : Tuple ):
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Node ):
if self.head is None:
UpperCamelCase_: Tuple = node
UpperCamelCase_: Optional[int] = node
else:
self.insert_before_node(self.head , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node ):
if self.head is None:
self.set_head(snake_case_ )
else:
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int ):
UpperCamelCase_: Any = Node(snake_case_ )
if self.head is None:
self.set_head(snake_case_ )
else:
self.set_tail(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: str = node
UpperCamelCase_: int = node.previous
if node.get_previous() is None:
UpperCamelCase_: int = node_to_insert
else:
UpperCamelCase_: Dict = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Dict , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: Tuple = node
UpperCamelCase_: Dict = node.next
if node.get_next() is None:
UpperCamelCase_: Union[str, Any] = node_to_insert
else:
UpperCamelCase_: str = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: List[str] = Node(snake_case_ )
UpperCamelCase_: Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case_ , snake_case_ )
return
current_position += 1
UpperCamelCase_: Dict = node.next
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase_: List[Any] = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[str] ):
if (node := self.get_node(snake_case_ )) is not None:
if node == self.head:
UpperCamelCase_: Optional[int] = self.head.get_next()
if node == self.tail:
UpperCamelCase_: Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(snake_case_ )
@staticmethod
def lowerCAmelCase__ ( snake_case_ : Node ):
if node.get_next():
UpperCamelCase_: str = node.previous
if node.get_previous():
UpperCamelCase_: int = node.next
UpperCamelCase_: List[str] = None
UpperCamelCase_: int = None
def lowerCAmelCase__ ( self : str ):
return self.head is None
def A__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 0 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _UpperCamelCase ( _snake_case ):
__UpperCamelCase : torch.FloatTensor
__UpperCamelCase : Optional[torch.FloatTensor] = None
def A__ ( lowerCamelCase , lowerCamelCase=0.999 , lowerCamelCase="cosine" , ) -> str:
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
UpperCamelCase_: int = []
for i in range(__UpperCAmelCase ):
UpperCamelCase_: Optional[Any] = i / num_diffusion_timesteps
UpperCamelCase_: List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class _UpperCamelCase ( _snake_case , _snake_case ):
@register_to_config
def __init__( self : Optional[int] , snake_case_ : int = 1000 , snake_case_ : str = "fixed_small_log" , snake_case_ : bool = True , snake_case_ : Optional[float] = 1.0 , snake_case_ : str = "epsilon" , snake_case_ : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
UpperCamelCase_: Optional[Any] = betas_for_alpha_bar(snake_case_ )
UpperCamelCase_: Union[str, Any] = 1.0 - self.betas
UpperCamelCase_: Optional[int] = torch.cumprod(self.alphas , dim=0 )
UpperCamelCase_: Tuple = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCamelCase_: Optional[Any] = 1.0
# setable values
UpperCamelCase_: Optional[Any] = None
UpperCamelCase_: Dict = torch.from_numpy(np.arange(0 , snake_case_ )[::-1].copy() )
UpperCamelCase_: int = variance_type
def lowerCAmelCase__ ( self : List[str] , snake_case_ : torch.FloatTensor , snake_case_ : Optional[int] = None ):
return sample
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int , snake_case_ : Union[str, torch.device] = None ):
UpperCamelCase_: List[Any] = num_inference_steps
UpperCamelCase_: Optional[int] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCamelCase_: Union[str, Any] = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCamelCase_: Optional[int] = torch.from_numpy(snake_case_ ).to(snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Any , snake_case_ : List[str]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=None ):
if prev_timestep is None:
UpperCamelCase_: List[Any] = t - 1
UpperCamelCase_: List[str] = self.alphas_cumprod[t]
UpperCamelCase_: Any = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCamelCase_: Optional[int] = 1 - alpha_prod_t
UpperCamelCase_: Optional[int] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCamelCase_: Dict = self.betas[t]
else:
UpperCamelCase_: List[str] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCamelCase_: Any = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCamelCase_: int = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCamelCase_: Union[str, Any] = torch.log(torch.clamp(snake_case_ , min=1e-20 ) )
UpperCamelCase_: Tuple = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCamelCase_: Any = variance.log()
UpperCamelCase_: int = beta.log()
UpperCamelCase_: str = (predicted_variance + 1) / 2
UpperCamelCase_: List[str] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : torch.FloatTensor , snake_case_ : int , snake_case_ : torch.FloatTensor , snake_case_ : Optional[int] = None , snake_case_ : List[str]=None , snake_case_ : bool = True , ):
UpperCamelCase_: List[str] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCamelCase_, UpperCamelCase_: List[str] = torch.split(snake_case_ , sample.shape[1] , dim=1 )
else:
UpperCamelCase_: Union[str, Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCamelCase_: List[Any] = t - 1
UpperCamelCase_: Optional[int] = self.alphas_cumprod[t]
UpperCamelCase_: List[str] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCamelCase_: Optional[Any] = 1 - alpha_prod_t
UpperCamelCase_: List[str] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCamelCase_: Union[str, Any] = self.betas[t]
UpperCamelCase_: str = self.alphas[t]
else:
UpperCamelCase_: Dict = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCamelCase_: Optional[int] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCamelCase_: Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCamelCase_: List[str] = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCamelCase_: str = torch.clamp(
snake_case_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase_: Optional[int] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCamelCase_: int = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase_: Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCamelCase_: Union[str, Any] = 0
if t > 0:
UpperCamelCase_: Optional[int] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=snake_case_ , device=model_output.device )
UpperCamelCase_: Optional[int] = self._get_variance(
snake_case_ , predicted_variance=snake_case_ , prev_timestep=snake_case_ , )
if self.variance_type == "fixed_small_log":
UpperCamelCase_: Optional[int] = variance
elif self.variance_type == "learned_range":
UpperCamelCase_: int = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
""" for the UnCLIPScheduler.""" )
UpperCamelCase_: Any = variance * variance_noise
UpperCamelCase_: Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=snake_case_ , pred_original_sample=snake_case_ )
def lowerCAmelCase__ ( self : str , snake_case_ : torch.FloatTensor , snake_case_ : torch.FloatTensor , snake_case_ : torch.IntTensor , ):
UpperCamelCase_: int = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCamelCase_: int = timesteps.to(original_samples.device )
UpperCamelCase_: int = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase_: Optional[int] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCamelCase_: Optional[int] = sqrt_alpha_prod.unsqueeze(-1 )
UpperCamelCase_: Dict = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase_: Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCamelCase_: Optional[Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCamelCase_: Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 713 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 0 |
import os
from datetime import datetime as dt
from github import Github
lowerCamelCase_ : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def A__ ( ) -> List[Any]:
UpperCamelCase_: Optional[Any] = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCamelCase_: Any = g.get_repo("""huggingface/diffusers""" )
UpperCamelCase_: Any = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCamelCase_: str = sorted(issue.get_comments() , key=lambda lowerCamelCase : i.created_at , reverse=lowerCAmelCase__ )
UpperCamelCase_: Union[str, Any] = comments[0] if len(lowerCAmelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 714 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : int ):
torch.manual_seed(0 )
UpperCamelCase_: Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCAmelCase__ ( self : Any ):
torch.manual_seed(0 )
UpperCamelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Union[str, Any] = self.dummy_uncond_unet
UpperCamelCase_: Optional[Any] = DDIMScheduler()
UpperCamelCase_: List[str] = self.dummy_vq_model
UpperCamelCase_: List[Any] = LDMPipeline(unet=snake_case_ , vqvae=snake_case_ , scheduler=snake_case_ )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = torch.manual_seed(0 )
UpperCamelCase_: int = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" ).images
UpperCamelCase_: Dict = torch.manual_seed(0 )
UpperCamelCase_: str = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=snake_case_ )[0]
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: str = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCamelCase_: Optional[Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = ldm(generator=snake_case_ , num_inference_steps=5 , output_type="""numpy""" ).images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase_: List[str] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCamelCase_: Dict = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 670 | 0 |
import warnings
from .generation import TFGenerationMixin
class _UpperCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , __lowerCAmelCase , )
| 715 |
def A__ ( lowerCamelCase = 50 ) -> int:
UpperCamelCase_: List[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 670 | 0 |
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_lowerCamelCase ) )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
if index == len(_lowerCamelCase ):
return True
# Recursive Step
for i in range(_lowerCamelCase ):
if valid_coloring(graph[index] , _lowerCamelCase , _lowerCamelCase ):
# Color current vertex
UpperCamelCase_: Any = i
# Validate coloring
if util_color(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , index + 1 ):
return True
# Backtrack
UpperCamelCase_: int = -1
return False
def A__ ( lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
UpperCamelCase_: int = [-1] * len(_lowerCamelCase )
if util_color(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , 0 ):
return colored_vertices
return []
| 716 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Initialise PyTorch model
UpperCamelCase_: List[Any] = TaConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Any = TaForConditionalGeneration(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 670 | 0 |
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
UpperCamelCase_: int = mf_knapsack(i - 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
UpperCamelCase_: Any = max(
mf_knapsack(i - 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , mf_knapsack(i - 1 , _lowerCamelCase , _lowerCamelCase , j - wt[i - 1] ) + val[i - 1] , )
UpperCamelCase_: List[Any] = val
return f[i][j]
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
UpperCamelCase_: str = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
UpperCamelCase_: Union[str, Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
UpperCamelCase_: Any = dp[i - 1][w_]
return dp[n][w_], dp
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
if not (isinstance(_lowerCamelCase , (list, tuple) ) and isinstance(_lowerCamelCase , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
UpperCamelCase_: List[str] = len(_lowerCamelCase )
if num_items != len(_lowerCamelCase ):
UpperCamelCase_: List[Any] = (
"The number of weights must be the same as the number of values.\n"
F'''But got {num_items} weights and {len(_lowerCamelCase )} values'''
)
raise ValueError(_lowerCamelCase )
for i in range(_lowerCamelCase ):
if not isinstance(wt[i] , _lowerCamelCase ):
UpperCamelCase_: str = (
"All weights must be integers but got weight of "
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = knapsack(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: set = set()
_construct_solution(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return optimal_val, example_optional_set
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_lowerCamelCase , _lowerCamelCase , i - 1 , _lowerCamelCase , _lowerCamelCase )
else:
optimal_set.add(_lowerCamelCase )
_construct_solution(_lowerCamelCase , _lowerCamelCase , i - 1 , j - wt[i - 1] , _lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : List[str] = [3, 2, 4, 4]
lowerCamelCase_ : List[Any] = [4, 3, 2, 3]
lowerCamelCase_ : Tuple = 4
lowerCamelCase_ : Optional[int] = 6
lowerCamelCase_ : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowerCamelCase_ , lowerCamelCase_ : Tuple = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowerCamelCase_ , lowerCamelCase_ : str = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 717 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : str = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , snake_case_ : Dict , snake_case_ : List[Any]=7 , snake_case_ : str=3 , snake_case_ : Optional[Any]=30 , snake_case_ : Dict=400 , snake_case_ : Any=True , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=True , snake_case_ : List[Any]=[0.5, 0.5, 0.5] , snake_case_ : List[str]=[0.5, 0.5, 0.5] , snake_case_ : List[Any]=True , snake_case_ : Tuple=1 / 255 , snake_case_ : Any=True , ):
UpperCamelCase_: Optional[int] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase_: Tuple = parent
UpperCamelCase_: Union[str, Any] = batch_size
UpperCamelCase_: str = num_channels
UpperCamelCase_: Union[str, Any] = min_resolution
UpperCamelCase_: List[Any] = max_resolution
UpperCamelCase_: List[Any] = do_resize
UpperCamelCase_: List[Any] = size
UpperCamelCase_: Optional[int] = do_normalize
UpperCamelCase_: Dict = image_mean
UpperCamelCase_: List[str] = image_std
UpperCamelCase_: Optional[Any] = do_rescale
UpperCamelCase_: Optional[Any] = rescale_factor
UpperCamelCase_: Optional[int] = do_pad
def lowerCAmelCase__ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any]=False ):
if not batched:
UpperCamelCase_: List[Any] = image_inputs[0]
if isinstance(lowerCamelCase_ , Image.Image ):
UpperCamelCase_: Any = image.size
else:
UpperCamelCase_: List[str] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase_: str = int(self.size["""shortest_edge"""] * h / w )
UpperCamelCase_: Optional[int] = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase_: Tuple = self.size['''shortest_edge''']
UpperCamelCase_: Union[str, Any] = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCamelCase_: Optional[Any] = self.size['''shortest_edge''']
UpperCamelCase_: Union[str, Any] = self.size['''shortest_edge''']
else:
UpperCamelCase_: Dict = []
for image in image_inputs:
UpperCamelCase_: Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase_: Union[str, Any] = max(lowerCamelCase_ , key=lambda snake_case_ : item[0] )[0]
UpperCamelCase_: List[str] = max(lowerCamelCase_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCamelCase ( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: List[str] = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase_ )
UpperCamelCase_: List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
pass
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_: Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase_: str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_: List[str] = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_: Tuple = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
UpperCamelCase_: List[Any] = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_: Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase_: List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_: str = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_: int = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
UpperCamelCase_: Tuple = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_: Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase_: str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_: Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_: str = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
UpperCamelCase_: Optional[int] = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
UpperCamelCase_: Optional[int] = json.loads(f.read() )
UpperCamelCase_: int = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
UpperCamelCase_: Any = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
UpperCamelCase_: List[str] = image_processing(images=lowerCamelCase_ , annotations=lowerCamelCase_ , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase_: List[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCamelCase_ )
UpperCamelCase_: Any = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase_: Dict = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCamelCase_ ) )
# verify boxes
UpperCamelCase_: List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCamelCase_ )
UpperCamelCase_: Any = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase_: Dict = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCamelCase_ ) )
# verify is_crowd
UpperCamelCase_: Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCamelCase_ ) )
# verify class_labels
UpperCamelCase_: List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCamelCase_ ) )
# verify orig_size
UpperCamelCase_: int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCamelCase_ ) )
# verify size
UpperCamelCase_: List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCamelCase_ ) )
@slow
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
UpperCamelCase_: List[Any] = json.loads(f.read() )
UpperCamelCase_: List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
UpperCamelCase_: List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
UpperCamelCase_: int = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
UpperCamelCase_: str = image_processing(images=lowerCamelCase_ , annotations=lowerCamelCase_ , masks_path=lowerCamelCase_ , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase_: Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCamelCase_ )
UpperCamelCase_: Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase_: Tuple = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCamelCase_ ) )
# verify boxes
UpperCamelCase_: Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCamelCase_ )
UpperCamelCase_: Optional[int] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase_: Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCamelCase_ ) )
# verify is_crowd
UpperCamelCase_: Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCamelCase_ ) )
# verify class_labels
UpperCamelCase_: Dict = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCamelCase_ ) )
# verify masks
UpperCamelCase_: Union[str, Any] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowerCamelCase_ )
# verify orig_size
UpperCamelCase_: List[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCamelCase_ ) )
# verify size
UpperCamelCase_: List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCamelCase_ ) )
| 718 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = "x" , lowerCamelCase = 10**-10 , lowerCamelCase = 1 , ) -> complex:
UpperCamelCase_: Optional[Any] = symbols(lowerCamelCase )
UpperCamelCase_: int = lambdify(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[Any] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_: Tuple = starting_point
while True:
if diff_function(lowerCamelCase ) != 0:
UpperCamelCase_: List[Any] = prev_guess - multiplicity * func(lowerCamelCase ) / diff_function(
lowerCamelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase_: Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 670 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Tuple = '''Hello, World!'''
lowerCamelCase_ : Optional[Any] = '''en_XX'''
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
UpperCamelCase_: Dict = Path("""data_bin""" )
UpperCamelCase_: Tuple = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_A ).parent ) , checkpoint_file=Path(_A ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_A ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_A ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_A )
UpperCamelCase_: str = xmod.model.encoder.sentence_encoder
UpperCamelCase_: List[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
UpperCamelCase_: Dict = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _A )
UpperCamelCase_: str = XmodForSequenceClassification(_A ) if classification_head else XmodForMaskedLM(_A )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCamelCase_: Dict = xmod_sent_encoder.embed_tokens.weight
UpperCamelCase_: Tuple = xmod_sent_encoder.embed_positions.weight
UpperCamelCase_: Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
UpperCamelCase_: int = xmod_sent_encoder.layernorm_embedding.weight
UpperCamelCase_: Dict = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCamelCase_: Tuple = model.roberta.encoder.layer[i]
UpperCamelCase_: Optional[int] = xmod_sent_encoder.layers[i]
# self attention
UpperCamelCase_: Union[str, Any] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
UpperCamelCase_: List[str] = xmod_layer.self_attn.q_proj.weight
UpperCamelCase_: Dict = xmod_layer.self_attn.q_proj.bias
UpperCamelCase_: int = xmod_layer.self_attn.k_proj.weight
UpperCamelCase_: str = xmod_layer.self_attn.k_proj.bias
UpperCamelCase_: str = xmod_layer.self_attn.v_proj.weight
UpperCamelCase_: int = xmod_layer.self_attn.v_proj.bias
# self-attention output
UpperCamelCase_: str = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
UpperCamelCase_: Any = xmod_layer.self_attn.out_proj.weight
UpperCamelCase_: Optional[Any] = xmod_layer.self_attn.out_proj.bias
UpperCamelCase_: Optional[int] = xmod_layer.self_attn_layer_norm.weight
UpperCamelCase_: List[str] = xmod_layer.self_attn_layer_norm.bias
# intermediate
UpperCamelCase_: Union[str, Any] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
UpperCamelCase_: str = xmod_layer.fca.weight
UpperCamelCase_: Tuple = xmod_layer.fca.bias
# output
UpperCamelCase_: str = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
UpperCamelCase_: Tuple = xmod_layer.fca.weight
UpperCamelCase_: Any = xmod_layer.fca.bias
UpperCamelCase_: str = xmod_layer.final_layer_norm.weight
UpperCamelCase_: Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
UpperCamelCase_: Tuple = xmod_layer.adapter_layer_norm.weight
UpperCamelCase_: List[str] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
UpperCamelCase_: Optional[int] = bert_output.adapter_modules[lang_code]
UpperCamelCase_: Optional[int] = xmod_layer.adapter_modules[lang_code]
UpperCamelCase_: Tuple = from_adapter.fca.weight
UpperCamelCase_: str = from_adapter.fca.bias
UpperCamelCase_: Tuple = from_adapter.fca.weight
UpperCamelCase_: Optional[Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
UpperCamelCase_: Optional[int] = xmod_sent_encoder.layer_norm.weight
UpperCamelCase_: Dict = xmod_sent_encoder.layer_norm.bias
if classification_head:
UpperCamelCase_: int = xmod.model.classification_heads["""mnli"""].dense.weight
UpperCamelCase_: Union[str, Any] = xmod.model.classification_heads["""mnli"""].dense.bias
UpperCamelCase_: int = xmod.model.classification_heads["""mnli"""].out_proj.weight
UpperCamelCase_: List[Any] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
UpperCamelCase_: Any = xmod.model.encoder.lm_head.dense.weight
UpperCamelCase_: Optional[Any] = xmod.model.encoder.lm_head.dense.bias
UpperCamelCase_: Any = xmod.model.encoder.lm_head.layer_norm.weight
UpperCamelCase_: Optional[int] = xmod.model.encoder.lm_head.layer_norm.bias
UpperCamelCase_: List[Any] = xmod.model.encoder.lm_head.weight
UpperCamelCase_: List[str] = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCamelCase_: int = xmod.encode(_A ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_A )
UpperCamelCase_: Tuple = model(_A )[0]
if classification_head:
UpperCamelCase_: int = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_A ) )
else:
UpperCamelCase_: Dict = xmod.model(_A , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
UpperCamelCase_: Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
UpperCamelCase_: int = torch.allclose(_A , _A , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_A ).mkdir(parents=_A , exist_ok=_A )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_A )
if __name__ == "__main__":
lowerCamelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
lowerCamelCase_ : List[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 719 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 0 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase ) -> List[str]:
UpperCamelCase_: List[Any] = torch.load(lowerCamelCase , map_location="""cpu""" )
UpperCamelCase_: List[Any] = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
UpperCamelCase_: Union[str, Any] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCamelCase_: Optional[int] = v
else:
UpperCamelCase_: List[str] = v
UpperCamelCase_: Optional[Any] = chkpt["""params"""]
UpperCamelCase_: Union[str, Any] = {n: v for n, v in config.items() if not isinstance(lowerCamelCase , (torch.FloatTensor, numpy.ndarray) )}
UpperCamelCase_: Union[str, Any] = chkpt["""dico_word2id"""]
UpperCamelCase_: Optional[int] = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCamelCase_: Union[str, Any] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
UpperCamelCase_: str = pytorch_dump_folder_path + """/""" + CONFIG_NAME
UpperCamelCase_: Union[str, Any] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(lowerCamelCase , lowerCamelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , indent=2 ) + """\n""" )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , indent=2 ) + """\n""" )
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Optional[Any] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 720 |
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[Any] = Text("""CPU""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(1 )]
UpperCamelCase_: Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[int] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
UpperCamelCase_: Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""Model""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
UpperCamelCase_: List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Tuple = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
UpperCamelCase_: int = 0.46 / 4
UpperCamelCase_: Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 670 | 0 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( lowerCamelCase ) -> str:
def decorator(lowerCamelCase ):
UpperCamelCase_: Dict = getattr(__UpperCamelCase , """handle_key""" , [] )
handle += [key]
setattr(__UpperCamelCase , """handle_key""" , __UpperCamelCase )
return func
return decorator
def A__ ( *lowerCamelCase ) -> str:
def decorator(lowerCamelCase ):
UpperCamelCase_: Optional[int] = getattr(__UpperCamelCase , """handle_key""" , [] )
handle += keys
setattr(__UpperCamelCase , """handle_key""" , __UpperCamelCase )
return func
return decorator
class _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __new__( cls : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : List[str] ):
UpperCamelCase_: Any = super().__new__(cls , _lowercase , _lowercase , _lowercase )
if not hasattr(_lowercase , """key_handler""" ):
setattr(_lowercase , """key_handler""" , {} )
setattr(_lowercase , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
UpperCamelCase_: List[str] = getattr(_lowercase , """handle_key""" , [] )
for key in handled_keys:
UpperCamelCase_: List[str] = value
return new_cls
@staticmethod
def lowerCAmelCase__ ( cls : List[Any] ):
UpperCamelCase_: Optional[int] = get_character()
if char != KEYMAP["undefined"]:
UpperCamelCase_: int = ord(_lowercase )
UpperCamelCase_: Tuple = cls.key_handler.get(_lowercase )
if handler:
UpperCamelCase_: Union[str, Any] = char
return handler(cls )
else:
return None
def A__ ( cls ) -> List[str]:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 721 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = """laion/clap-htsat-unfused"""
UpperCamelCase_: List[str] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Dict = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[str] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Optional[Any] = floats_list((3, 1000) )
UpperCamelCase_: List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: int = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.get_feature_extractor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Dict = """This is a test string"""
UpperCamelCase_: Tuple = processor(text=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = self.get_feature_extractor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Tuple = processor.batch_decode(snake_case_ )
UpperCamelCase_: str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = self.get_feature_extractor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 670 | 0 |
def A__ ( lowerCamelCase ) -> List[str]:
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
UpperCamelCase_: Optional[int] = [True] * (num + 1)
UpperCamelCase_: int = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , snake_case__ ):
UpperCamelCase_: List[str] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : List[str] = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 700 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Tuple=None , **snake_case_ : List[str] ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case_ , )
super().__init__(args=snake_case_ , **snake_case_ )
| 670 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Dict = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
UpperCamelCase_: List[Any] = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: int = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(snake_case_ ) , x.transpose() ) )
UpperCamelCase_: Dict = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(snake_case_ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Optional[int] = np.random.randn(3 , 4 )
UpperCamelCase_: str = torch.tensor(snake_case_ )
self.assertTrue(np.allclose(transpose(snake_case_ ) , transpose(snake_case_ ).numpy() ) )
UpperCamelCase_: int = np.random.randn(3 , 4 , 5 )
UpperCamelCase_: str = torch.tensor(snake_case_ )
self.assertTrue(np.allclose(transpose(snake_case_ , axes=(1, 2, 0) ) , transpose(snake_case_ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = np.random.randn(3 , 4 )
UpperCamelCase_: Optional[Any] = tf.constant(snake_case_ )
self.assertTrue(np.allclose(transpose(snake_case_ ) , transpose(snake_case_ ).numpy() ) )
UpperCamelCase_: int = np.random.randn(3 , 4 , 5 )
UpperCamelCase_: List[str] = tf.constant(snake_case_ )
self.assertTrue(np.allclose(transpose(snake_case_ , axes=(1, 2, 0) ) , transpose(snake_case_ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Tuple = np.random.randn(3 , 4 )
UpperCamelCase_: str = jnp.array(snake_case_ )
self.assertTrue(np.allclose(transpose(snake_case_ ) , np.asarray(transpose(snake_case_ ) ) ) )
UpperCamelCase_: Tuple = np.random.randn(3 , 4 , 5 )
UpperCamelCase_: Optional[int] = jnp.array(snake_case_ )
self.assertTrue(np.allclose(transpose(snake_case_ , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case_ , axes=(1, 2, 0) ) ) ) )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: str = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(snake_case_ , (4, 3) ) , np.reshape(snake_case_ , (4, 3) ) ) )
UpperCamelCase_: Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(snake_case_ , (12, 5) ) , np.reshape(snake_case_ , (12, 5) ) ) )
@require_torch
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: List[Any] = np.random.randn(3 , 4 )
UpperCamelCase_: Optional[int] = torch.tensor(snake_case_ )
self.assertTrue(np.allclose(reshape(snake_case_ , (4, 3) ) , reshape(snake_case_ , (4, 3) ).numpy() ) )
UpperCamelCase_: Tuple = np.random.randn(3 , 4 , 5 )
UpperCamelCase_: List[Any] = torch.tensor(snake_case_ )
self.assertTrue(np.allclose(reshape(snake_case_ , (12, 5) ) , reshape(snake_case_ , (12, 5) ).numpy() ) )
@require_tf
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Tuple = np.random.randn(3 , 4 )
UpperCamelCase_: Optional[int] = tf.constant(snake_case_ )
self.assertTrue(np.allclose(reshape(snake_case_ , (4, 3) ) , reshape(snake_case_ , (4, 3) ).numpy() ) )
UpperCamelCase_: str = np.random.randn(3 , 4 , 5 )
UpperCamelCase_: str = tf.constant(snake_case_ )
self.assertTrue(np.allclose(reshape(snake_case_ , (12, 5) ) , reshape(snake_case_ , (12, 5) ).numpy() ) )
@require_flax
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = np.random.randn(3 , 4 )
UpperCamelCase_: Optional[int] = jnp.array(snake_case_ )
self.assertTrue(np.allclose(reshape(snake_case_ , (4, 3) ) , np.asarray(reshape(snake_case_ , (4, 3) ) ) ) )
UpperCamelCase_: List[str] = np.random.randn(3 , 4 , 5 )
UpperCamelCase_: int = jnp.array(snake_case_ )
self.assertTrue(np.allclose(reshape(snake_case_ , (12, 5) ) , np.asarray(reshape(snake_case_ , (12, 5) ) ) ) )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Any = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(snake_case_ ) , np.squeeze(snake_case_ ) ) )
UpperCamelCase_: str = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(snake_case_ , axis=2 ) , np.squeeze(snake_case_ , axis=2 ) ) )
@require_torch
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Optional[Any] = np.random.randn(1 , 3 , 4 )
UpperCamelCase_: Tuple = torch.tensor(snake_case_ )
self.assertTrue(np.allclose(squeeze(snake_case_ ) , squeeze(snake_case_ ).numpy() ) )
UpperCamelCase_: int = np.random.randn(1 , 4 , 1 , 5 )
UpperCamelCase_: Tuple = torch.tensor(snake_case_ )
self.assertTrue(np.allclose(squeeze(snake_case_ , axis=2 ) , squeeze(snake_case_ , axis=2 ).numpy() ) )
@require_tf
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: str = np.random.randn(1 , 3 , 4 )
UpperCamelCase_: Optional[int] = tf.constant(snake_case_ )
self.assertTrue(np.allclose(squeeze(snake_case_ ) , squeeze(snake_case_ ).numpy() ) )
UpperCamelCase_: Optional[int] = np.random.randn(1 , 4 , 1 , 5 )
UpperCamelCase_: List[Any] = tf.constant(snake_case_ )
self.assertTrue(np.allclose(squeeze(snake_case_ , axis=2 ) , squeeze(snake_case_ , axis=2 ).numpy() ) )
@require_flax
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[str] = np.random.randn(1 , 3 , 4 )
UpperCamelCase_: List[Any] = jnp.array(snake_case_ )
self.assertTrue(np.allclose(squeeze(snake_case_ ) , np.asarray(squeeze(snake_case_ ) ) ) )
UpperCamelCase_: Dict = np.random.randn(1 , 4 , 1 , 5 )
UpperCamelCase_: Optional[int] = jnp.array(snake_case_ )
self.assertTrue(np.allclose(squeeze(snake_case_ , axis=2 ) , np.asarray(squeeze(snake_case_ , axis=2 ) ) ) )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: List[str] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(snake_case_ , axis=1 ) , np.expand_dims(snake_case_ , axis=1 ) ) )
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Any = np.random.randn(3 , 4 )
UpperCamelCase_: Optional[int] = torch.tensor(snake_case_ )
self.assertTrue(np.allclose(expand_dims(snake_case_ , axis=1 ) , expand_dims(snake_case_ , axis=1 ).numpy() ) )
@require_tf
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = np.random.randn(3 , 4 )
UpperCamelCase_: List[Any] = tf.constant(snake_case_ )
self.assertTrue(np.allclose(expand_dims(snake_case_ , axis=1 ) , expand_dims(snake_case_ , axis=1 ).numpy() ) )
@require_flax
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Dict = np.random.randn(3 , 4 )
UpperCamelCase_: Dict = jnp.array(snake_case_ )
self.assertTrue(np.allclose(expand_dims(snake_case_ , axis=1 ) , np.asarray(expand_dims(snake_case_ , axis=1 ) ) ) )
| 701 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = logging.get_logger("""transformers.models.speecht5""")
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
hf_model.apply_weight_norm()
UpperCamelCase_: Union[str, Any] = checkpoint["""input_conv.weight_g"""]
UpperCamelCase_: Optional[int] = checkpoint["""input_conv.weight_v"""]
UpperCamelCase_: List[Any] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCamelCase_: Dict = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCamelCase_: Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCamelCase_: int = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCamelCase_: int = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase_: Tuple = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase_: List[str] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if config_path is not None:
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_: str = SpeechTaHifiGanConfig()
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGan(lowerCamelCase )
UpperCamelCase_: str = torch.load(lowerCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = np.load(lowerCamelCase )
UpperCamelCase_: int = stats[0].reshape(-1 )
UpperCamelCase_: Union[str, Any] = stats[1].reshape(-1 )
UpperCamelCase_: Dict = torch.from_numpy(lowerCamelCase ).float()
UpperCamelCase_: Optional[Any] = torch.from_numpy(lowerCamelCase ).float()
model.save_pretrained(lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 670 | 0 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def A__ ( lowerCamelCase ) -> Tuple:
UpperCamelCase_: Dict = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCamelCase_: str = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
UpperCamelCase_: str = [[0.0, 0.0], [0.0, 0.0]]
UpperCamelCase_, UpperCamelCase_: Optional[int] = matrix[1][1], matrix[0][0]
UpperCamelCase_, UpperCamelCase_: List[str] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCamelCase_: Optional[int] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
UpperCamelCase_: Union[str, Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCamelCase_: Union[str, Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCamelCase_: Dict = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCamelCase_: Tuple = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCamelCase_: str = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCamelCase_: Dict = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCamelCase_: List[Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCamelCase_: str = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCamelCase_: Optional[int] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCamelCase_: Dict = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCamelCase_: List[Any] = array(lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
UpperCamelCase_: Any = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCamelCase_: List[Any] = array(lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCamelCase )
# Calculate the inverse of the matrix
return [[float(d(lowerCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 702 |
lowerCamelCase_ : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Optional[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 670 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 703 |
import cva
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , snake_case_ : float , snake_case_ : int ):
if k in (0.04, 0.06):
UpperCamelCase_: Union[str, Any] = k
UpperCamelCase_: Union[str, Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : int ):
return str(self.k )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : str ):
UpperCamelCase_: int = cva.imread(snake_case_ , 0 )
UpperCamelCase_, UpperCamelCase_: List[Any] = img.shape
UpperCamelCase_: list[list[int]] = []
UpperCamelCase_: int = img.copy()
UpperCamelCase_: Any = cva.cvtColor(snake_case_ , cva.COLOR_GRAY2RGB )
UpperCamelCase_, UpperCamelCase_: List[Any] = np.gradient(snake_case_ )
UpperCamelCase_: Optional[Any] = dx**2
UpperCamelCase_: Dict = dy**2
UpperCamelCase_: Optional[Any] = dx * dy
UpperCamelCase_: str = 0.04
UpperCamelCase_: int = self.window_size // 2
for y in range(snake_case_ , h - offset ):
for x in range(snake_case_ , w - offset ):
UpperCamelCase_: List[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = (wxx * wyy) - (wxy**2)
UpperCamelCase_: Optional[int] = wxx + wyy
UpperCamelCase_: Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = HarrisCorner(0.04, 3)
lowerCamelCase_ , lowerCamelCase_ : Any = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 670 | 0 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = None
__UpperCamelCase : Optional[int] = BloomTokenizerFast
__UpperCamelCase : Optional[int] = BloomTokenizerFast
__UpperCamelCase : List[str] = True
__UpperCamelCase : int = False
__UpperCamelCase : List[str] = """tokenizer_file"""
__UpperCamelCase : Optional[Any] = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def lowerCAmelCase__ ( self : Tuple ):
super().setUp()
UpperCamelCase_: Union[str, Any] = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : str , **snake_case_ : Dict ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: int = self.get_rust_tokenizer()
UpperCamelCase_: List[str] = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
UpperCamelCase_: Optional[int] = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
UpperCamelCase_: Optional[int] = tokenizer.batch_encode_plus(snake_case_ )["input_ids"]
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[Any] = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : List[str]=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: List[Any] = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
UpperCamelCase_: Dict = "This is a simple input"
UpperCamelCase_: Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
UpperCamelCase_: List[str] = ("This is a simple input", "This is a pair")
UpperCamelCase_: Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(snake_case_ , max_length=snake_case_ )
tokenizer_r.encode_plus(snake_case_ , max_length=snake_case_ )
tokenizer_r.batch_encode_plus(snake_case_ , max_length=snake_case_ )
tokenizer_r.encode(snake_case_ , max_length=snake_case_ )
tokenizer_r.batch_encode_plus(snake_case_ , max_length=snake_case_ )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
UpperCamelCase_: str = None # Hotfixing padding = None
self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding="""max_length""" )
# Simple input
self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding="""max_length""" )
# Simple input
self.assertRaises(
snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding="""max_length""" , )
# Pair input
self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding="""max_length""" )
# Pair input
self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding="""max_length""" )
# Pair input
self.assertRaises(
snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding="""max_length""" , )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase_: List[str] = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=snake_case_ )
UpperCamelCase_: Optional[Any] = next(iter(snake_case_ ) )["premise"] # pick up one data
UpperCamelCase_: Any = list(sample_data.values() )
UpperCamelCase_: List[str] = list(map(tokenizer.encode , snake_case_ ) )
UpperCamelCase_: Optional[Any] = [tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ ) for x in output_tokens]
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 704 |
import random
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False ) -> dict:
UpperCamelCase_: dict = {i: [] for i in range(lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase ):
for j in range(i + 1 , lowerCamelCase ):
if random.random() < probability:
graph[i].append(lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase )
return graph
def A__ ( lowerCamelCase ) -> dict:
return {
i: [j for j in range(lowerCamelCase ) if i != j] for i in range(lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 0 |
def A__ ( lowerCamelCase ) -> Any:
def merge(lowerCamelCase , lowerCamelCase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(lowerCamelCase ) <= 1:
return collection
UpperCamelCase_: Dict = len(lowerCamelCase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : int = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase_ : List[str] = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 705 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Optional[int] = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase_: Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = logging.get_verbosity()
UpperCamelCase_: int = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Union[str, Any] = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(snake_case_ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def lowerCAmelCase__ ( self : Optional[int] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: str = os.getenv("""TRANSFORMERS_VERBOSITY""" , snake_case_ )
UpperCamelCase_: Any = logging.log_levels[env_level_str]
UpperCamelCase_: Dict = logging.get_verbosity()
self.assertEqual(
snake_case_ , snake_case_ , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCamelCase_: str = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def lowerCAmelCase__ ( self : List[Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: str = logging.logging.getLogger()
with CaptureLogger(snake_case_ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def lowerCAmelCase__ ( self : List[Any] ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Any = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
def A__ ( ) -> Union[str, Any]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 670 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case : Union[str, Any] = {
"""configuration_encodec""": [
"""ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EncodecConfig""",
],
"""feature_extraction_encodec""": ["""EncodecFeatureExtractor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = [
"""ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EncodecModel""",
"""EncodecPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
snake_case : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 706 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase_ : Optional[int] = HUGGINGFACE_HUB_CACHE
lowerCamelCase_ : List[str] = """config.json"""
lowerCamelCase_ : Any = """diffusion_pytorch_model.bin"""
lowerCamelCase_ : Union[str, Any] = """diffusion_flax_model.msgpack"""
lowerCamelCase_ : Dict = """model.onnx"""
lowerCamelCase_ : List[Any] = """diffusion_pytorch_model.safetensors"""
lowerCamelCase_ : Optional[Any] = """weights.pb"""
lowerCamelCase_ : Optional[Any] = """https://huggingface.co"""
lowerCamelCase_ : Union[str, Any] = default_cache_path
lowerCamelCase_ : Tuple = """diffusers_modules"""
lowerCamelCase_ : Optional[Any] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowerCamelCase_ : str = ["""fp16""", """non-ema"""]
lowerCamelCase_ : List[Any] = """.self_attn"""
| 670 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : List[str] = {
"""configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""],
"""feature_extraction_whisper""": ["""WhisperFeatureExtractor"""],
"""processing_whisper""": ["""WhisperProcessor"""],
"""tokenization_whisper""": ["""WhisperTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""WhisperTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = [
"""WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WhisperForConditionalGeneration""",
"""WhisperModel""",
"""WhisperPreTrainedModel""",
"""WhisperForAudioClassification""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWhisperForConditionalGeneration""",
"""TFWhisperModel""",
"""TFWhisperPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = [
"""FlaxWhisperForConditionalGeneration""",
"""FlaxWhisperModel""",
"""FlaxWhisperPreTrainedModel""",
"""FlaxWhisperForAudioClassification""",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 707 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase_: List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCamelCase_: str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Any = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
UpperCamelCase_: Dict = [sys.executable] + distributed_args
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
| 670 | 0 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _UpperCamelCase :
'''simple docstring'''
def lowerCAmelCase__ ( self : Dict , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : int ):
return None
class _UpperCamelCase :
'''simple docstring'''
def lowerCAmelCase__ ( self : int , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ):
return None
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def lowerCAmelCase__ ( self : Optional[int] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowercase_ , """tf""" , 12 , **lowercase_ )
@require_torch
@slow
def lowerCAmelCase__ ( self : Tuple ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowercase_ , """pt""" , 12 , **lowercase_ )
@require_torch
@slow
def lowerCAmelCase__ ( self : Optional[int] ):
from transformers import BertModel
UpperCamelCase_: str = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(lowercase_ ) )
vocab_file.flush()
UpperCamelCase_: str = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
UpperCamelCase_: Any = BertModel(BertConfig(vocab_size=len(lowercase_ ) ) )
model.save_pretrained(lowercase_ )
self._test_export(lowercase_ , """pt""" , 12 , lowercase_ )
@require_tf
@slow
def lowerCAmelCase__ ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase_: Optional[int] = self._test_export(lowercase_ , """tf""" , 12 , **lowercase_ )
UpperCamelCase_: Dict = quantize(Path(lowercase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowercase_ ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def lowerCAmelCase__ ( self : Any ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase_: Union[str, Any] = self._test_export(lowercase_ , """pt""" , 12 , **lowercase_ )
UpperCamelCase_: List[Any] = quantize(lowercase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowercase_ ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def lowerCAmelCase__ ( self : Dict , snake_case_ : List[str] , snake_case_ : int , snake_case_ : List[str] , snake_case_ : List[str]=None , **snake_case_ : Tuple ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
UpperCamelCase_: List[str] = Path(lowercase_ ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
return path
except Exception as e:
self.fail(lowercase_ )
@require_torch
@require_tokenizers
@slow
def lowerCAmelCase__ ( self : int ):
from transformers import BertModel
UpperCamelCase_: List[str] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
UpperCamelCase_: Any = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(lowercase_ , lowercase_ , """pt""" )
@require_tf
@require_tokenizers
@slow
def lowerCAmelCase__ ( self : Tuple ):
from transformers import TFBertModel
UpperCamelCase_: Any = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
UpperCamelCase_: str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(lowercase_ , lowercase_ , """tf""" )
def lowerCAmelCase__ ( self : Any , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
UpperCamelCase_: Optional[int] = FeatureExtractionPipeline(lowercase_ , lowercase_ )
UpperCamelCase_: List[Any] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
UpperCamelCase_: Any = infer_shapes(lowercase_ , lowercase_ )
# Assert all variables are present
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowercase_ )
self.assertSequenceEqual(variable_names[3:] , lowercase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = ["""input_ids""", """attention_mask""", """token_type_ids"""]
UpperCamelCase_: List[Any] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
UpperCamelCase_: Optional[Any] = ensure_valid_input(FuncContiguousArgs() , lowercase_ , lowercase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowercase_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowercase_ ) , set(lowercase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowercase_ , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
UpperCamelCase_: List[Any] = ensure_valid_input(FuncNonContiguousArgs() , lowercase_ , lowercase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowercase_ ) , 1 )
self.assertEqual(len(lowercase_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Tuple = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 708 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = BarthezTokenizer
__UpperCamelCase : str = BarthezTokenizerFast
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = True
def lowerCAmelCase__ ( self : Optional[int] ):
super().setUp()
UpperCamelCase_: Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ )
UpperCamelCase_: Dict = tokenizer
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: str = """<pad>"""
UpperCamelCase_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case_ ) , 10_1122 )
def lowerCAmelCase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_: Union[str, Any] = [0, 57, 3018, 7_0307, 91, 2]
UpperCamelCase_: Union[str, Any] = self.tokenizer(
snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase_: Any = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
if not self.test_rust_tokenizer:
return
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase_: str = """I was born in 92000, and this is falsé."""
UpperCamelCase_: str = tokenizer.tokenize(snake_case_ )
UpperCamelCase_: int = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase_: int = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = self.get_rust_tokenizer()
UpperCamelCase_: Tuple = tokenizer.encode(snake_case_ )
UpperCamelCase_: Tuple = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase__ ( self : int ):
# fmt: off
UpperCamelCase_: Optional[Any] = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase_: str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=snake_case_ , )
| 670 | 0 |
import math
def A__ ( lowerCamelCase , lowerCamelCase ) -> float:
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_lowercase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 709 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
while second != 0:
UpperCamelCase_: Optional[Any] = first & second
first ^= second
UpperCamelCase_: Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : List[Any] = int(input("""Enter the first number: """).strip())
lowerCamelCase_ : Tuple = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 670 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[Any] = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _UpperCamelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase : Dict = """swin"""
__UpperCamelCase : List[str] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[Any] , snake_case_ : Union[str, Any]=224 , snake_case_ : str=4 , snake_case_ : Union[str, Any]=3 , snake_case_ : Union[str, Any]=96 , snake_case_ : Optional[int]=[2, 2, 6, 2] , snake_case_ : Any=[3, 6, 12, 24] , snake_case_ : Dict=7 , snake_case_ : Optional[Any]=4.0 , snake_case_ : Optional[int]=True , snake_case_ : Optional[int]=0.0 , snake_case_ : List[Any]=0.0 , snake_case_ : int=0.1 , snake_case_ : Any="gelu" , snake_case_ : Optional[Any]=False , snake_case_ : Optional[int]=0.02 , snake_case_ : Dict=1e-5 , snake_case_ : Optional[int]=32 , snake_case_ : int=None , snake_case_ : Any=None , **snake_case_ : Optional[int] , ):
super().__init__(**__lowerCAmelCase )
UpperCamelCase_: Tuple = image_size
UpperCamelCase_: Optional[Any] = patch_size
UpperCamelCase_: List[Any] = num_channels
UpperCamelCase_: Union[str, Any] = embed_dim
UpperCamelCase_: Union[str, Any] = depths
UpperCamelCase_: Dict = len(__lowerCAmelCase )
UpperCamelCase_: Union[str, Any] = num_heads
UpperCamelCase_: List[Any] = window_size
UpperCamelCase_: Optional[int] = mlp_ratio
UpperCamelCase_: Any = qkv_bias
UpperCamelCase_: Dict = hidden_dropout_prob
UpperCamelCase_: Any = attention_probs_dropout_prob
UpperCamelCase_: str = drop_path_rate
UpperCamelCase_: Optional[Any] = hidden_act
UpperCamelCase_: Optional[int] = use_absolute_embeddings
UpperCamelCase_: int = layer_norm_eps
UpperCamelCase_: str = initializer_range
UpperCamelCase_: Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase_: Union[str, Any] = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) )
UpperCamelCase_: str = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(__lowerCAmelCase ) + 1 )]
UpperCamelCase_, UpperCamelCase_: Optional[Any] = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
class _UpperCamelCase ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase : List[Any] = version.parse("""1.11""" )
@property
def lowerCAmelCase__ ( self : Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self : Optional[int] ):
return 1e-4
| 710 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCamelCase_ : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCamelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Optional[str] = field(default=_A , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCAmelCase__ ( self : Dict ):
if self.train_file is not None:
UpperCamelCase_: Union[str, Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCamelCase_: Dict = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : PreTrainedTokenizerBase
__UpperCamelCase : Union[bool, str, PaddingStrategy] = True
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[int] = None
def __call__( self : Optional[int] , snake_case_ : Dict ):
UpperCamelCase_: Dict = """label""" if """label""" in features[0].keys() else """labels"""
UpperCamelCase_: int = [feature.pop(snake_case_ ) for feature in features]
UpperCamelCase_: Optional[Any] = len(snake_case_ )
UpperCamelCase_: List[str] = len(features[0]["""input_ids"""] )
UpperCamelCase_: Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
UpperCamelCase_: Any = list(chain(*snake_case_ ) )
UpperCamelCase_: List[Any] = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
UpperCamelCase_: Tuple = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
UpperCamelCase_: Optional[int] = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def A__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_: str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase_: Dict = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCamelCase_: List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase_: List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCamelCase_: List[str] = {}
if data_args.train_file is not None:
UpperCamelCase_: List[Any] = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase_: Optional[int] = data_args.validation_file
UpperCamelCase_: Any = data_args.train_file.split(""".""" )[-1]
UpperCamelCase_: Tuple = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCamelCase_: int = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCamelCase_: Union[str, Any] = [F'''ending{i}''' for i in range(4 )]
UpperCamelCase_: str = """sent1"""
UpperCamelCase_: List[str] = """sent2"""
if data_args.max_seq_length is None:
UpperCamelCase_: int = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
UpperCamelCase_: Optional[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
UpperCamelCase_: Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase ):
UpperCamelCase_: Optional[Any] = [[context] * 4 for context in examples[context_name]]
UpperCamelCase_: Dict = examples[question_header_name]
UpperCamelCase_: List[str] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
UpperCamelCase_: str = list(chain(*lowerCamelCase ) )
UpperCamelCase_: Any = list(chain(*lowerCamelCase ) )
# Tokenize
UpperCamelCase_: Any = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
UpperCamelCase_: str = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
UpperCamelCase_: Union[str, Any] = min(len(lowerCamelCase ) , data_args.max_train_samples )
UpperCamelCase_: Optional[int] = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
UpperCamelCase_: str = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
UpperCamelCase_: Dict = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
UpperCamelCase_: str = min(len(lowerCamelCase ) , data_args.max_eval_samples )
UpperCamelCase_: Tuple = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
UpperCamelCase_: str = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCamelCase_: str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase ):
UpperCamelCase_, UpperCamelCase_: List[str] = eval_predictions
UpperCamelCase_: Optional[Any] = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCamelCase_: Union[str, Any] = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase_: List[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase_: int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase_: str = last_checkpoint
UpperCamelCase_: Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase_: Tuple = train_result.metrics
UpperCamelCase_: Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""train""" , lowerCamelCase )
trainer.save_metrics("""train""" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase_: Optional[Any] = trainer.evaluate()
UpperCamelCase_: Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""eval""" , lowerCamelCase )
trainer.save_metrics("""eval""" , lowerCamelCase )
UpperCamelCase_: Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def A__ ( lowerCamelCase ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 670 | 0 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , """width_multiplier""" ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Any , snake_case_ : Optional[int] , snake_case_ : Optional[int]=13 , snake_case_ : Dict=64 , snake_case_ : Union[str, Any]=2 , snake_case_ : Any=3 , snake_case_ : Any="swish" , snake_case_ : List[Any]=3 , snake_case_ : str=32 , snake_case_ : List[str]=0.1 , snake_case_ : Tuple=0.02 , snake_case_ : Optional[Any]=True , snake_case_ : Tuple=True , snake_case_ : str=10 , snake_case_ : List[str]=None , snake_case_ : Optional[int]=0.25 , snake_case_ : int=0.0 , snake_case_ : Tuple=0.0 , ):
UpperCamelCase_: List[Any] = parent
UpperCamelCase_: str = batch_size
UpperCamelCase_: int = image_size
UpperCamelCase_: Any = patch_size
UpperCamelCase_: int = num_channels
UpperCamelCase_: List[Any] = make_divisible(512 * width_multiplier , divisor=8 )
UpperCamelCase_: Optional[int] = hidden_act
UpperCamelCase_: Any = conv_kernel_size
UpperCamelCase_: int = output_stride
UpperCamelCase_: List[Any] = classifier_dropout_prob
UpperCamelCase_: Tuple = use_labels
UpperCamelCase_: Optional[Any] = is_training
UpperCamelCase_: Optional[int] = num_labels
UpperCamelCase_: int = initializer_range
UpperCamelCase_: Tuple = scope
UpperCamelCase_: Any = width_multiplier
UpperCamelCase_: Optional[Any] = ffn_dropout
UpperCamelCase_: List[Any] = attn_dropout
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_: int = None
UpperCamelCase_: Union[str, Any] = None
if self.use_labels:
UpperCamelCase_: List[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase_: Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase_: Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase__ ( self : Tuple ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Union[str, Any] ):
UpperCamelCase_: Union[str, Any] = MobileViTVaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase_: Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase__ ( self : str , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[str] ):
UpperCamelCase_: List[str] = self.num_labels
UpperCamelCase_: Any = MobileViTVaForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase_: List[str] = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Tuple ):
UpperCamelCase_: Optional[int] = self.num_labels
UpperCamelCase_: int = MobileViTVaForSemanticSegmentation(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase_: int = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCamelCase_: Optional[Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Optional[Any] = config_and_inputs
UpperCamelCase_: int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[str] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase : Union[str, Any] = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase : str = False
__UpperCamelCase : Dict = False
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : Optional[int] = False
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = MobileViTVaModelTester(self )
UpperCamelCase_: Any = MobileViTVaConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def lowerCAmelCase__ ( self : str ):
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def lowerCAmelCase__ ( self : Union[str, Any] ):
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def lowerCAmelCase__ ( self : Tuple ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def lowerCAmelCase__ ( self : Tuple ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase__ ( self : Any ):
pass
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_, UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: Optional[Any] = model_class(UpperCAmelCase_ )
UpperCamelCase_: Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_: List[str] = [*signature.parameters.keys()]
UpperCamelCase_: Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
def check_hidden_states_output(snake_case_ : int , snake_case_ : Dict , snake_case_ : Any ):
UpperCamelCase_: Dict = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase_: Dict = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
UpperCamelCase_: List[str] = outputs.hidden_states
UpperCamelCase_: Any = 5
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCamelCase_: Dict = 2
for i in range(len(UpperCAmelCase_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCamelCase_, UpperCamelCase_: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: Optional[Any] = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_: Optional[int] = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase_ )
@slow
def lowerCAmelCase__ ( self : List[Any] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_: Dict = MobileViTVaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def A__ ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase_: int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self : Tuple ):
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: int = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
UpperCAmelCase_ )
UpperCamelCase_: Any = self.default_image_processor
UpperCamelCase_: str = prepare_img()
UpperCamelCase_: Tuple = image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase_: Dict = model(**UpperCAmelCase_ )
# verify the logits
UpperCamelCase_: List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
UpperCamelCase_: str = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4 ) )
@slow
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCamelCase_: Tuple = model.to(UpperCAmelCase_ )
UpperCamelCase_: Optional[Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCamelCase_: str = prepare_img()
UpperCamelCase_: Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase_: Any = model(**UpperCAmelCase_ )
UpperCamelCase_: Any = outputs.logits
# verify the logits
UpperCamelCase_: Any = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCAmelCase_ )
UpperCamelCase_: int = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=UpperCAmelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1e-4 ) )
@slow
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Tuple = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCamelCase_: Tuple = model.to(UpperCAmelCase_ )
UpperCamelCase_: str = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCamelCase_: Optional[Any] = prepare_img()
UpperCamelCase_: List[Any] = image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase_: Dict = model(**UpperCAmelCase_ )
UpperCamelCase_: Optional[int] = outputs.logits.detach().cpu()
UpperCamelCase_: str = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_ , target_sizes=[(50, 60)] )
UpperCamelCase_: int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase_ )
UpperCamelCase_: List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_ )
UpperCamelCase_: str = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase_ )
| 711 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : Union[str, Any] = logging.getLogger()
lowerCamelCase_ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Dict ):
os.makedirs(snake_case_ , exist_ok=snake_case_ )
UpperCamelCase_: int = {"""source""": """What is love ?""", """target""": """life"""}
UpperCamelCase_: Tuple = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase_: Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(snake_case_ , f'''{split}.{field}''' ) , """w""" ) as f:
f.write(snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : int , snake_case_ : str = "pytorch" ):
UpperCamelCase_: Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Dict = os.path.join(snake_case_ , """output""" )
UpperCamelCase_: Any = os.path.join(snake_case_ , """data""" )
self._create_dummy_data(data_dir=snake_case_ )
UpperCamelCase_: Union[str, Any] = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
UpperCamelCase_: Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(snake_case_ , env=self.get_env() )
UpperCamelCase_: Optional[int] = os.path.join(snake_case_ , """metrics.json""" )
with open(snake_case_ ) as f:
UpperCamelCase_: Any = json.load(snake_case_ )
return result
@require_torch_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 670 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : List[str] , ):
UpperCamelCase_: List[str] = parent
UpperCamelCase_: Optional[Any] = 13
UpperCamelCase_: Any = 7
UpperCamelCase_: int = True
UpperCamelCase_: Union[str, Any] = True
UpperCamelCase_: List[Any] = True
UpperCamelCase_: Any = True
UpperCamelCase_: int = True
UpperCamelCase_: str = False
UpperCamelCase_: Optional[int] = False
UpperCamelCase_: Tuple = False
UpperCamelCase_: int = 2
UpperCamelCase_: Any = 99
UpperCamelCase_: Optional[Any] = 0
UpperCamelCase_: Optional[Any] = 32
UpperCamelCase_: List[Any] = 2
UpperCamelCase_: Any = 4
UpperCamelCase_: Dict = 0.1
UpperCamelCase_: Tuple = 0.1
UpperCamelCase_: Dict = 512
UpperCamelCase_: Tuple = 16
UpperCamelCase_: Any = 2
UpperCamelCase_: int = 0.02
UpperCamelCase_: Tuple = 3
UpperCamelCase_: List[Any] = 4
UpperCamelCase_: int = '''last'''
UpperCamelCase_: int = True
UpperCamelCase_: Optional[Any] = None
UpperCamelCase_: Optional[int] = 0
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: int = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCamelCase_: int = None
if self.use_input_lengths:
UpperCamelCase_: Dict = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase_: int = None
if self.use_token_type_ids:
UpperCamelCase_: str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase_: Union[str, Any] = None
UpperCamelCase_: List[str] = None
UpperCamelCase_: Tuple = None
if self.use_labels:
UpperCamelCase_: str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_: Optional[int] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCamelCase_: Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_: List[str] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCAmelCase__ ( self : int , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : int , ):
UpperCamelCase_: Dict = TFFlaubertModel(config=UpperCamelCase__ )
UpperCamelCase_: List[str] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCamelCase_: Optional[Any] = model(UpperCamelCase__ )
UpperCamelCase_: List[str] = [input_ids, input_mask]
UpperCamelCase_: Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : int , ):
UpperCamelCase_: List[Any] = TFFlaubertWithLMHeadModel(UpperCamelCase__ )
UpperCamelCase_: Optional[int] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCamelCase_: Any = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : List[str] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : str , snake_case_ : str , snake_case_ : int , snake_case_ : Dict , ):
UpperCamelCase_: Union[str, Any] = TFFlaubertForQuestionAnsweringSimple(UpperCamelCase__ )
UpperCamelCase_: Dict = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCamelCase_: Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self : str , snake_case_ : Any , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Tuple , ):
UpperCamelCase_: Any = TFFlaubertForSequenceClassification(UpperCamelCase__ )
UpperCamelCase_: List[str] = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCamelCase_: Any = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : int , ):
UpperCamelCase_: List[str] = self.num_labels
UpperCamelCase_: Optional[int] = TFFlaubertForTokenClassification(config=UpperCamelCase__ )
UpperCamelCase_: Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase_: Union[str, Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self : str , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , ):
UpperCamelCase_: List[Any] = self.num_choices
UpperCamelCase_: int = TFFlaubertForMultipleChoice(config=UpperCamelCase__ )
UpperCamelCase_: Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_: Dict = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_: Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_: int = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCamelCase_: Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Optional[Any] = self.prepare_config_and_inputs()
(
UpperCamelCase_
): Optional[int] = config_and_inputs
UpperCamelCase_: Optional[Any] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCamelCase : List[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__UpperCamelCase : Union[str, Any] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase : Any = False
__UpperCamelCase : Dict = False
def lowerCAmelCase__ ( self : int , snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : str ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Optional[Any] = TFFlaubertModelTester(self )
UpperCamelCase_: Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , emb_dim=37 )
def lowerCAmelCase__ ( self : int ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCamelCase__ )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCamelCase__ )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*UpperCamelCase__ )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*UpperCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_: Union[str, Any] = TFFlaubertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Optional[Any] = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCamelCase_: Optional[Any] = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCamelCase_: Tuple = model(UpperCamelCase__ )[0]
UpperCamelCase_: str = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice.
UpperCamelCase_: Optional[Any] = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 712 |
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : int , snake_case_ : Optional[Any]=None , snake_case_ : List[str]=None ):
UpperCamelCase_: List[Any] = data
UpperCamelCase_: List[Any] = previous
UpperCamelCase_: Tuple = next_node
def __str__( self : Dict ):
return f'''{self.data}'''
def lowerCAmelCase__ ( self : List[str] ):
return self.data
def lowerCAmelCase__ ( self : Any ):
return self.next
def lowerCAmelCase__ ( self : List[str] ):
return self.previous
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = head
def __iter__( self : Union[str, Any] ):
return self
def lowerCAmelCase__ ( self : Union[str, Any] ):
if not self.current:
raise StopIteration
else:
UpperCamelCase_: Dict = self.current.get_data()
UpperCamelCase_: Tuple = self.current.get_next()
return value
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int ):
UpperCamelCase_: Optional[int] = None # First node in list
UpperCamelCase_: Dict = None # Last node in list
def __str__( self : Tuple ):
UpperCamelCase_: int = self.head
UpperCamelCase_: Tuple = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase_: List[str] = current.get_next()
return " ".join(str(snake_case_ ) for node in nodes )
def __contains__( self : int , snake_case_ : int ):
UpperCamelCase_: Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase_: Any = current.get_next()
return False
def __iter__( self : Any ):
return LinkedListIterator(self.head )
def lowerCAmelCase__ ( self : Tuple ):
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Node ):
if self.head is None:
UpperCamelCase_: Tuple = node
UpperCamelCase_: Optional[int] = node
else:
self.insert_before_node(self.head , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node ):
if self.head is None:
self.set_head(snake_case_ )
else:
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int ):
UpperCamelCase_: Any = Node(snake_case_ )
if self.head is None:
self.set_head(snake_case_ )
else:
self.set_tail(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: str = node
UpperCamelCase_: int = node.previous
if node.get_previous() is None:
UpperCamelCase_: int = node_to_insert
else:
UpperCamelCase_: Dict = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Dict , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: Tuple = node
UpperCamelCase_: Dict = node.next
if node.get_next() is None:
UpperCamelCase_: Union[str, Any] = node_to_insert
else:
UpperCamelCase_: str = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: List[str] = Node(snake_case_ )
UpperCamelCase_: Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case_ , snake_case_ )
return
current_position += 1
UpperCamelCase_: Dict = node.next
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase_: List[Any] = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[str] ):
if (node := self.get_node(snake_case_ )) is not None:
if node == self.head:
UpperCamelCase_: Optional[int] = self.head.get_next()
if node == self.tail:
UpperCamelCase_: Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(snake_case_ )
@staticmethod
def lowerCAmelCase__ ( snake_case_ : Node ):
if node.get_next():
UpperCamelCase_: str = node.previous
if node.get_previous():
UpperCamelCase_: int = node.next
UpperCamelCase_: List[str] = None
UpperCamelCase_: int = None
def lowerCAmelCase__ ( self : str ):
return self.head is None
def A__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase_ : List[str] = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class _UpperCamelCase ( _A ):
__UpperCamelCase : Union[str, Any] = """nllb-moe"""
__UpperCamelCase : List[str] = ["""past_key_values"""]
__UpperCamelCase : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] , snake_case_ : str=12_8112 , snake_case_ : str=1024 , snake_case_ : Dict=12 , snake_case_ : Dict=4096 , snake_case_ : List[Any]=16 , snake_case_ : Dict=12 , snake_case_ : List[Any]=4096 , snake_case_ : int=16 , snake_case_ : List[Any]=0.05 , snake_case_ : Any=0.05 , snake_case_ : Tuple=True , snake_case_ : List[Any]=True , snake_case_ : List[Any]="relu" , snake_case_ : Union[str, Any]=1024 , snake_case_ : Dict=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Optional[int]=0.0 , snake_case_ : Optional[int]=0.02 , snake_case_ : Optional[int]=2 , snake_case_ : Dict=True , snake_case_ : Tuple=False , snake_case_ : str="float32" , snake_case_ : List[Any]=False , snake_case_ : str=128 , snake_case_ : Union[str, Any]=64 , snake_case_ : Dict=4 , snake_case_ : Optional[Any]=4 , snake_case_ : Optional[Any]=0.001 , snake_case_ : List[Any]=0.001 , snake_case_ : List[str]="all" , snake_case_ : Dict=False , snake_case_ : Optional[Any]=False , snake_case_ : List[str]=1.0 , snake_case_ : Any=0.2 , snake_case_ : str=1 , snake_case_ : Dict=0 , snake_case_ : List[str]=2 , snake_case_ : Any=False , **snake_case_ : Tuple , ):
UpperCamelCase_: Optional[int] = vocab_size
UpperCamelCase_: Tuple = max_position_embeddings
UpperCamelCase_: str = d_model
UpperCamelCase_: Optional[int] = encoder_ffn_dim
UpperCamelCase_: int = encoder_layers
UpperCamelCase_: Dict = encoder_attention_heads
UpperCamelCase_: Union[str, Any] = decoder_ffn_dim
UpperCamelCase_: Dict = decoder_layers
UpperCamelCase_: str = decoder_attention_heads
UpperCamelCase_: Any = dropout
UpperCamelCase_: List[Any] = attention_dropout
UpperCamelCase_: Optional[Any] = activation_dropout
UpperCamelCase_: Dict = activation_function
UpperCamelCase_: int = init_std
UpperCamelCase_: Union[str, Any] = encoder_layerdrop
UpperCamelCase_: int = decoder_layerdrop
UpperCamelCase_: List[Any] = use_cache
UpperCamelCase_: Tuple = encoder_layers
UpperCamelCase_: Dict = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase_: Tuple = router_z_loss_coef
UpperCamelCase_: str = router_aux_loss_coef
UpperCamelCase_: Optional[int] = decoder_sparse_step
UpperCamelCase_: Optional[Any] = encoder_sparse_step
UpperCamelCase_: Union[str, Any] = num_experts
UpperCamelCase_: Union[str, Any] = expert_capacity
UpperCamelCase_: List[Any] = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
UpperCamelCase_: Dict = router_dtype
UpperCamelCase_: Optional[Any] = router_ignore_padding_tokens
UpperCamelCase_: Tuple = batch_prioritized_routing
UpperCamelCase_: List[Any] = second_expert_policy
UpperCamelCase_: int = normalize_router_prob_before_dropping
UpperCamelCase_: Any = moe_eval_capacity_token_fraction
UpperCamelCase_: List[str] = moe_token_dropout
UpperCamelCase_: Tuple = output_router_logits
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
| 713 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 0 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _UpperCamelCase ( yaml.SafeLoader ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int , snake_case_ : List[str] ):
UpperCamelCase_: Any = [self.constructed_objects[key_node] for key_node, _ in node.value]
UpperCamelCase_: Any = [tuple(snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else key for key in keys]
UpperCamelCase_: Dict = Counter(snake_case_ )
UpperCamelCase_: List[str] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : str , snake_case_ : Optional[int]=False ):
UpperCamelCase_: Tuple = super().construct_mapping(snake_case_ , deep=snake_case_ )
self._check_no_duplicates_on_constructed_node(snake_case_ )
return mapping
def A__ ( lowerCamelCase ) -> List[str]:
UpperCamelCase_: Dict = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
UpperCamelCase_: Union[str, Any] = full_content[1:].index("""---""" ) + 1
UpperCamelCase_: List[str] = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCamelCase )
class _UpperCamelCase ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase : Tuple = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , snake_case_ : Path ):
with open(snake_case_ , encoding="""utf-8""" ) as readme_file:
UpperCamelCase_: Union[str, Any] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(snake_case_ )
else:
return cls()
def lowerCAmelCase__ ( self : Any , snake_case_ : Path ):
if path.exists():
with open(snake_case_ , encoding="""utf-8""" ) as readme_file:
UpperCamelCase_: str = readme_file.read()
else:
UpperCamelCase_: str = None
UpperCamelCase_: Tuple = self._to_readme(snake_case_ )
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Optional[str] = None ):
if readme_content is not None:
UpperCamelCase_: List[str] = _split_yaml_from_readme(snake_case_ )
UpperCamelCase_: List[str] = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
UpperCamelCase_: List[Any] = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] , snake_case_ : str ):
UpperCamelCase_: int = yaml.load(snake_case_ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
UpperCamelCase_: List[Any] = {
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=snake_case_ , allow_unicode=snake_case_ , encoding="""utf-8""" , ).decode("""utf-8""" )
lowerCamelCase_ : List[str] = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowerCamelCase_ : Dict = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
lowerCamelCase_ : Union[str, Any] = ap.parse_args()
lowerCamelCase_ : Dict = Path(args.readme_filepath)
lowerCamelCase_ : Any = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 714 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : int ):
torch.manual_seed(0 )
UpperCamelCase_: Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCAmelCase__ ( self : Any ):
torch.manual_seed(0 )
UpperCamelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Union[str, Any] = self.dummy_uncond_unet
UpperCamelCase_: Optional[Any] = DDIMScheduler()
UpperCamelCase_: List[str] = self.dummy_vq_model
UpperCamelCase_: List[Any] = LDMPipeline(unet=snake_case_ , vqvae=snake_case_ , scheduler=snake_case_ )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = torch.manual_seed(0 )
UpperCamelCase_: int = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" ).images
UpperCamelCase_: Dict = torch.manual_seed(0 )
UpperCamelCase_: str = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=snake_case_ )[0]
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: str = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCamelCase_: Optional[Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = ldm(generator=snake_case_ , num_inference_steps=5 , output_type="""numpy""" ).images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase_: List[str] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCamelCase_: Dict = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 670 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: List[str] = 1
UpperCamelCase_: Dict = 3
UpperCamelCase_: Any = (32, 32)
UpperCamelCase_: Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowercase )
return image
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
UpperCamelCase_: List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def lowerCAmelCase__ ( self : List[str] ):
torch.manual_seed(0 )
UpperCamelCase_: str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def lowerCAmelCase__ ( self : Tuple ):
torch.manual_seed(0 )
UpperCamelCase_: Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__lowercase )
@property
def lowerCAmelCase__ ( self : Dict ):
def extract(*snake_case_ : Optional[Any] , **snake_case_ : Union[str, Any] ):
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] ):
UpperCamelCase_: Any = torch.ones([0] )
def lowerCAmelCase__ ( self : Any , snake_case_ : Dict ):
self.pixel_values.to(__lowercase )
return self
return Out()
return extract
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_: List[Any] = self.dummy_cond_unet
UpperCamelCase_: Optional[int] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
UpperCamelCase_: Dict = self.dummy_vae
UpperCamelCase_: Dict = self.dummy_text_encoder
UpperCamelCase_: int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCamelCase_: Tuple = StableDiffusionPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
UpperCamelCase_: Optional[Any] = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
UpperCamelCase_: Tuple = """A painting of a squirrel eating a burger"""
UpperCamelCase_: Optional[int] = torch.Generator(device=__lowercase ).manual_seed(0 )
UpperCamelCase_: Optional[Any] = sd_pipe([prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
UpperCamelCase_: Optional[Any] = output.images
UpperCamelCase_: str = torch.Generator(device=__lowercase ).manual_seed(0 )
UpperCamelCase_: Optional[int] = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowercase , )[0]
UpperCamelCase_: Optional[int] = image[0, -3:, -3:, -1]
UpperCamelCase_: int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: Tuple = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_: Union[str, Any] = self.dummy_cond_unet
UpperCamelCase_: List[str] = PNDMScheduler(skip_prk_steps=__lowercase )
UpperCamelCase_: Optional[int] = self.dummy_vae
UpperCamelCase_: Tuple = self.dummy_text_encoder
UpperCamelCase_: Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCamelCase_: Union[str, Any] = StableDiffusionPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
UpperCamelCase_: Tuple = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
UpperCamelCase_: Tuple = """A painting of a squirrel eating a burger"""
UpperCamelCase_: Dict = torch.Generator(device=__lowercase ).manual_seed(0 )
UpperCamelCase_: Dict = sd_pipe([prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
UpperCamelCase_: Optional[Any] = output.images
UpperCamelCase_: Union[str, Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
UpperCamelCase_: Any = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowercase , )[0]
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: Optional[int] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: str = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=__lowercase )
assert isinstance(__lowercase , __lowercase )
assert isinstance(pipe.scheduler , __lowercase )
assert pipe.safety_checker is None
UpperCamelCase_: str = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowercase )
UpperCamelCase_: Tuple = StableDiffusionPipeline.from_pretrained(__lowercase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase_: str = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Optional[Any] = self.dummy_cond_unet
UpperCamelCase_: str = PNDMScheduler(skip_prk_steps=__lowercase )
UpperCamelCase_: Optional[int] = self.dummy_vae
UpperCamelCase_: Any = self.dummy_text_encoder
UpperCamelCase_: Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
UpperCamelCase_: Union[str, Any] = unet.half()
UpperCamelCase_: int = vae.half()
UpperCamelCase_: Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase_: List[Any] = StableDiffusionPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
UpperCamelCase_: Tuple = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
UpperCamelCase_: Tuple = """A painting of a squirrel eating a burger"""
UpperCamelCase_: Union[str, Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Dict = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowercase )
UpperCamelCase_: Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCamelCase_: Tuple = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
UpperCamelCase_: str = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
UpperCamelCase_: Optional[int] = 40_0366_0346
UpperCamelCase_: Any = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCamelCase_: List[Any] = torch.manual_seed(__lowercase )
UpperCamelCase_: int = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase_: str = output.images
UpperCamelCase_: Any = image[0, -3:, -3:, -1]
UpperCamelCase_: List[str] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
UpperCamelCase_: str = torch.manual_seed(__lowercase )
UpperCamelCase_: Optional[int] = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase_: Tuple = output.images
UpperCamelCase_: str = image[0, -3:, -3:, -1]
UpperCamelCase_: Union[str, Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Dict = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowercase )
UpperCamelCase_: Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCamelCase_: Optional[int] = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
UpperCamelCase_: str = """padme amidala taking a bath artwork, safe for work, no nudity"""
UpperCamelCase_: Union[str, Any] = 27_3497_1755
UpperCamelCase_: List[str] = 7
UpperCamelCase_: List[str] = torch.manual_seed(__lowercase )
UpperCamelCase_: Tuple = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase_: Dict = output.images
UpperCamelCase_: Tuple = image[0, -3:, -3:, -1]
UpperCamelCase_: Any = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
UpperCamelCase_: int = torch.manual_seed(__lowercase )
UpperCamelCase_: int = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase_: List[str] = output.images
UpperCamelCase_: str = image[0, -3:, -3:, -1]
UpperCamelCase_: Any = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Optional[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
UpperCamelCase_: int = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
UpperCamelCase_: Optional[int] = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
UpperCamelCase_: Tuple = 10_4435_5234
UpperCamelCase_: Optional[Any] = 12
UpperCamelCase_: Dict = torch.manual_seed(__lowercase )
UpperCamelCase_: Dict = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase_: Tuple = output.images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Dict = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
UpperCamelCase_: List[str] = torch.manual_seed(__lowercase )
UpperCamelCase_: int = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase_: Optional[int] = output.images
UpperCamelCase_: Optional[int] = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[Any] = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 715 |
def A__ ( lowerCamelCase = 50 ) -> int:
UpperCamelCase_: List[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 670 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCamelCase ( __snake_case ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A_ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(A_ , """depth_multiplier""" ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int , snake_case_ : int , snake_case_ : Optional[Any]=13 , snake_case_ : Optional[int]=3 , snake_case_ : int=32 , snake_case_ : Optional[Any]=0.25 , snake_case_ : Dict=8 , snake_case_ : Dict=True , snake_case_ : int=1024 , snake_case_ : Dict=32 , snake_case_ : str="relu6" , snake_case_ : Any=0.1 , snake_case_ : Optional[int]=0.02 , snake_case_ : Union[str, Any]=True , snake_case_ : Dict=True , snake_case_ : int=10 , snake_case_ : Any=None , ):
UpperCamelCase_: Any = parent
UpperCamelCase_: Optional[int] = batch_size
UpperCamelCase_: Optional[int] = num_channels
UpperCamelCase_: Tuple = image_size
UpperCamelCase_: Any = depth_multiplier
UpperCamelCase_: Any = min_depth
UpperCamelCase_: Dict = tf_padding
UpperCamelCase_: int = int(last_hidden_size * depth_multiplier )
UpperCamelCase_: Optional[int] = output_stride
UpperCamelCase_: Any = hidden_act
UpperCamelCase_: Any = classifier_dropout_prob
UpperCamelCase_: Optional[int] = use_labels
UpperCamelCase_: List[str] = is_training
UpperCamelCase_: List[Any] = num_labels
UpperCamelCase_: Optional[Any] = initializer_range
UpperCamelCase_: Union[str, Any] = scope
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_: int = None
UpperCamelCase_: Dict = None
if self.use_labels:
UpperCamelCase_: Any = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase_: Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase_: Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase__ ( self : int ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self : Dict , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : int ):
UpperCamelCase_: List[str] = MobileNetVaModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase_: List[Any] = model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : str ):
UpperCamelCase_: List[Any] = self.num_labels
UpperCamelCase_: List[str] = MobileNetVaForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase_: Tuple = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Optional[int] = self.prepare_config_and_inputs()
UpperCamelCase_: int = config_and_inputs
UpperCamelCase_: Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
__UpperCamelCase : Optional[int] = (
{'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : List[str] = False
__UpperCamelCase : int = False
__UpperCamelCase : int = False
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Union[str, Any] = MobileNetVaModelTester(self )
UpperCamelCase_: List[str] = MobileNetVaConfigTester(self , config_class=A_ , has_text_modality=A_ )
def lowerCAmelCase__ ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def lowerCAmelCase__ ( self : Optional[Any] ):
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def lowerCAmelCase__ ( self : Union[str, Any] ):
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def lowerCAmelCase__ ( self : Dict ):
pass
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: List[str] = model_class(A_ )
UpperCamelCase_: Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_: List[Any] = [*signature.parameters.keys()]
UpperCamelCase_: Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def lowerCAmelCase__ ( self : Any ):
def check_hidden_states_output(snake_case_ : Any , snake_case_ : Any , snake_case_ : Optional[Any] ):
UpperCamelCase_: Dict = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase_: str = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase_: Union[str, Any] = outputs.hidden_states
UpperCamelCase_: List[str] = 26
self.assertEqual(len(A_ ) , A_ )
UpperCamelCase_: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: Optional[int] = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_: List[Any] = True
check_hidden_states_output(A_ , A_ , A_ )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_: Tuple = MobileNetVaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A__ ( ) -> Any:
UpperCamelCase_: List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self : Any ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(A_ )
UpperCamelCase_: List[Any] = self.default_image_processor
UpperCamelCase_: Dict = prepare_img()
UpperCamelCase_: Any = image_processor(images=A_ , return_tensors="""pt""" ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase_: Any = model(**A_ )
# verify the logits
UpperCamelCase_: Optional[int] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase_: Any = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
| 716 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Initialise PyTorch model
UpperCamelCase_: List[Any] = TaConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Any = TaForConditionalGeneration(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 670 | 0 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowerCamelCase_ : Tuple = TypeVar("""KT""")
lowerCamelCase_ : Union[str, Any] = TypeVar("""VT""")
class _UpperCamelCase ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self : int , snake_case_ : Any = "root" , snake_case_ : Any = None ):
UpperCamelCase_: Optional[Any] = key
UpperCamelCase_: Optional[int] = value
UpperCamelCase_: Tuple = []
def __repr__( self : Optional[Any] ):
return f'''Node({self.key}: {self.value})'''
@property
def lowerCAmelCase__ ( self : Tuple ):
return len(self.forward )
class _UpperCamelCase ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Any = 0.5 , snake_case_ : int = 16 ):
UpperCamelCase_: Optional[int] = Node[KT, VT]()
UpperCamelCase_: Union[str, Any] = 0
UpperCamelCase_: List[str] = p
UpperCamelCase_: List[str] = max_level
def __str__( self : Dict ):
UpperCamelCase_: Optional[int] = list(self )
if len(snake_case_ ) == 0:
return f'''SkipList(level={self.level})'''
UpperCamelCase_: Union[str, Any] = max((len(str(snake_case_ ) ) for item in items) , default=4 )
UpperCamelCase_: Dict = max(snake_case_ , 4 ) + 4
UpperCamelCase_: List[str] = self.head
UpperCamelCase_: int = []
UpperCamelCase_: int = node.forward.copy()
lines.append(f'''[{node.key}]'''.ljust(snake_case_ , """-""" ) + """* """ * len(snake_case_ ) )
lines.append(""" """ * label_size + """| """ * len(snake_case_ ) )
while len(node.forward ) != 0:
UpperCamelCase_: Optional[Any] = node.forward[0]
lines.append(
f'''[{node.key}]'''.ljust(snake_case_ , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(snake_case_ ) )
UpperCamelCase_: Any = node.forward
lines.append("""None""".ljust(snake_case_ ) + """* """ * len(snake_case_ ) )
return f'''SkipList(level={self.level})\n''' + "\n".join(snake_case_ )
def __iter__( self : Union[str, Any] ):
UpperCamelCase_: Any = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
UpperCamelCase_: str = node.forward[0]
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Union[str, Any] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def lowerCAmelCase__ ( self : int , snake_case_ : str ):
UpperCamelCase_: str = []
UpperCamelCase_: str = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
UpperCamelCase_: Optional[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(snake_case_ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Optional[int] ):
UpperCamelCase_, UpperCamelCase_: Tuple = self._locate_node(snake_case_ )
if node is not None:
for i, update_node in enumerate(snake_case_ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
UpperCamelCase_: str = node.forward[i]
else:
UpperCamelCase_: Union[str, Any] = update_node.forward[:i]
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
UpperCamelCase_, UpperCamelCase_: int = self._locate_node(snake_case_ )
if node is not None:
UpperCamelCase_: List[str] = value
else:
UpperCamelCase_: List[Any] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , snake_case_ ):
update_vector.append(self.head )
UpperCamelCase_: Any = level
UpperCamelCase_: List[str] = Node(snake_case_ , snake_case_ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(snake_case_ )
else:
UpperCamelCase_: Dict = new_node
def lowerCAmelCase__ ( self : Tuple , snake_case_ : Union[str, Any] ):
UpperCamelCase_, UpperCamelCase_: List[str] = self._locate_node(snake_case_ )
if node is not None:
return node.value
return None
def A__ ( ) -> Any:
UpperCamelCase_: Dict = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 12 )
skip_list.insert("""Key3""" , 41 )
skip_list.insert("""Key4""" , -19 )
UpperCamelCase_: List[Any] = skip_list.head
UpperCamelCase_: Optional[Any] = {}
while node.level != 0:
UpperCamelCase_: Tuple = node.forward[0]
UpperCamelCase_: Union[str, Any] = node.value
assert len(lowerCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def A__ ( ) -> str:
UpperCamelCase_: List[str] = SkipList()
skip_list.insert("""Key1""" , 10 )
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 10 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 10 )
UpperCamelCase_: Dict = skip_list.head
UpperCamelCase_: Union[str, Any] = {}
while node.level != 0:
UpperCamelCase_: int = node.forward[0]
UpperCamelCase_: List[str] = node.value
if len(lowerCamelCase ) != 4:
print()
assert len(lowerCamelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def A__ ( ) -> Any:
UpperCamelCase_: List[Any] = SkipList()
assert skip_list.find("""Some key""" ) is None
def A__ ( ) -> Dict:
UpperCamelCase_: str = SkipList()
skip_list.insert("""Key2""" , 20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""" , 10 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def A__ ( ) -> Union[str, Any]:
UpperCamelCase_: Any = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def A__ ( ) -> List[Any]:
UpperCamelCase_: str = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def A__ ( ) -> Union[str, Any]:
UpperCamelCase_: Optional[int] = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def A__ ( ) -> Dict:
UpperCamelCase_: str = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 1_42 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""X""" )
def traverse_keys(lowerCamelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(lowerCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def A__ ( ) -> List[Any]:
def is_sorted(lowerCamelCase ):
return all(next_item >= item for item, next_item in zip(lowerCamelCase , lst[1:] ) )
UpperCamelCase_: Optional[Any] = SkipList()
for i in range(10 ):
skip_list.insert(lowerCamelCase , lowerCamelCase )
assert is_sorted(list(lowerCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(lowerCamelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(lowerCamelCase ) )
def A__ ( ) -> Any:
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def A__ ( ) -> Dict:
UpperCamelCase_: List[Any] = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 717 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : str = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ) -> Optional[int]:
UpperCamelCase_: Dict = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__lowerCAmelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__lowerCAmelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__lowerCAmelCase )
return parser.parse_args()
def A__ ( ) -> List[Any]:
UpperCamelCase_: List[str] = parse_args()
# Import training_script as a module.
UpperCamelCase_: Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase_: Any = script_fpath.stem
UpperCamelCase_: Tuple = importlib.import_module(__lowerCAmelCase )
# Patch sys.argv
UpperCamelCase_: Union[str, Any] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 718 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = "x" , lowerCamelCase = 10**-10 , lowerCamelCase = 1 , ) -> complex:
UpperCamelCase_: Optional[Any] = symbols(lowerCamelCase )
UpperCamelCase_: int = lambdify(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[Any] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_: Tuple = starting_point
while True:
if diff_function(lowerCamelCase ) != 0:
UpperCamelCase_: List[Any] = prev_guess - multiplicity * func(lowerCamelCase ) / diff_function(
lowerCamelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase_: Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 670 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : List[str] = logging.get_logger(__name__)
lowerCamelCase_ : List[Any] = """https://openaipublic.azureedge.net/jukebox/models/"""
lowerCamelCase_ : Optional[Any] = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def A__ ( lowerCamelCase ) -> List[Any]:
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
UpperCamelCase_: Tuple = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
UpperCamelCase_: Optional[Any] = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
UpperCamelCase_: Union[str, Any] = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
UpperCamelCase_: int = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
UpperCamelCase_: List[Any] = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
UpperCamelCase_: Optional[Any] = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
UpperCamelCase_: int = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
UpperCamelCase_: List[str] = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
UpperCamelCase_: Optional[int] = {}
import re
UpperCamelCase_: Any = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
UpperCamelCase_: List[str] = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
UpperCamelCase_: List[Any] = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
UpperCamelCase_: Any = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
UpperCamelCase_: Union[str, Any] = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
UpperCamelCase_: Optional[Any] = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
UpperCamelCase_: Tuple = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
UpperCamelCase_: List[str] = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
UpperCamelCase_: Any = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(lowercase__ ):
UpperCamelCase_: List[str] = re_encoder_block_conv_in.match(lowercase__ )
UpperCamelCase_: Dict = regex_match.groups()
UpperCamelCase_: List[Any] = int(groups[2] ) * 2 + int(groups[3] )
UpperCamelCase_: Tuple = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
UpperCamelCase_: List[str] = re_encoder_block_conv_in.sub(lowercase__ , lowercase__ )
elif re_encoder_block_resnet.fullmatch(lowercase__ ):
UpperCamelCase_: str = re_encoder_block_resnet.match(lowercase__ )
UpperCamelCase_: Union[str, Any] = regex_match.groups()
UpperCamelCase_: int = int(groups[2] ) * 2 + int(groups[3] )
UpperCamelCase_: Tuple = {"""1""": 1, """3""": 2}[groups[-2]]
UpperCamelCase_: Union[str, Any] = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
UpperCamelCase_: Tuple = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
UpperCamelCase_: List[str] = prefix + resnet_block
UpperCamelCase_: str = re_encoder_block_resnet.sub(lowercase__ , lowercase__ )
elif re_encoder_block_proj_out.fullmatch(lowercase__ ):
UpperCamelCase_: int = re_encoder_block_proj_out.match(lowercase__ )
UpperCamelCase_: Optional[Any] = regex_match.groups()
UpperCamelCase_: Dict = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
UpperCamelCase_: str = re_encoder_block_proj_out.sub(lowercase__ , lowercase__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(lowercase__ ):
UpperCamelCase_: Optional[int] = re_decoder_block_conv_out.match(lowercase__ )
UpperCamelCase_: List[str] = regex_match.groups()
UpperCamelCase_: int = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCamelCase_: int = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
UpperCamelCase_: str = re_decoder_block_conv_out.sub(lowercase__ , lowercase__ )
elif re_decoder_block_resnet.fullmatch(lowercase__ ):
UpperCamelCase_: Dict = re_decoder_block_resnet.match(lowercase__ )
UpperCamelCase_: str = regex_match.groups()
UpperCamelCase_: Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCamelCase_: Union[str, Any] = {"""1""": 1, """3""": 2}[groups[-2]]
UpperCamelCase_: List[str] = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
UpperCamelCase_: Optional[int] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
UpperCamelCase_: Any = prefix + resnet_block
UpperCamelCase_: int = re_decoder_block_resnet.sub(lowercase__ , lowercase__ )
elif re_decoder_block_proj_in.fullmatch(lowercase__ ):
UpperCamelCase_: Optional[Any] = re_decoder_block_proj_in.match(lowercase__ )
UpperCamelCase_: Any = regex_match.groups()
UpperCamelCase_: Union[str, Any] = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
UpperCamelCase_: Optional[Any] = re_decoder_block_proj_in.sub(lowercase__ , lowercase__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(lowercase__ ):
UpperCamelCase_: int = re_prior_cond_conv_out.match(lowercase__ )
UpperCamelCase_: List[Any] = regex_match.groups()
UpperCamelCase_: Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCamelCase_: Any = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
UpperCamelCase_: int = re_prior_cond_conv_out.sub(lowercase__ , lowercase__ )
elif re_prior_cond_resnet.fullmatch(lowercase__ ):
UpperCamelCase_: Tuple = re_prior_cond_resnet.match(lowercase__ )
UpperCamelCase_: Union[str, Any] = regex_match.groups()
UpperCamelCase_: int = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCamelCase_: int = {"""1""": 1, """3""": 2}[groups[-2]]
UpperCamelCase_: List[Any] = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
UpperCamelCase_: List[str] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
UpperCamelCase_: Optional[int] = prefix + resnet_block
UpperCamelCase_: Union[str, Any] = re_prior_cond_resnet.sub(lowercase__ , lowercase__ )
elif re_prior_cond_proj_in.fullmatch(lowercase__ ):
UpperCamelCase_: Any = re_prior_cond_proj_in.match(lowercase__ )
UpperCamelCase_: str = regex_match.groups()
UpperCamelCase_: Tuple = F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
UpperCamelCase_: Optional[Any] = re_prior_cond_proj_in.sub(lowercase__ , lowercase__ )
# keep original key
else:
UpperCamelCase_: int = original_key
UpperCamelCase_: List[Any] = replace_key(lowercase__ )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
UpperCamelCase_: List[str] = model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
UpperCamelCase_: Any = original_key
UpperCamelCase_: Optional[Any] = original_key
UpperCamelCase_: Union[str, Any] = value
return new_dict
@torch.no_grad()
def A__ ( lowerCamelCase=None , lowerCamelCase=None ) -> Any:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ):
UpperCamelCase_: List[str] = requests.get(F'''{PREFIX}{file}''' , allow_redirects=lowercase__ )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=lowercase__ )
open(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' , """wb""" ).write(r.content )
UpperCamelCase_: Union[str, Any] = MODEL_MAPPING[model_name.split("""/""" )[-1]]
UpperCamelCase_: Any = JukeboxConfig.from_pretrained(lowercase__ )
UpperCamelCase_: List[Any] = JukeboxModel(lowercase__ )
UpperCamelCase_: Optional[Any] = []
UpperCamelCase_: Dict = {}
for i, dict_name in enumerate(lowercase__ ):
UpperCamelCase_: Dict = torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )["""model"""]
UpperCamelCase_: List[Any] = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
UpperCamelCase_: List[str] = old_dic[k]
elif k.endswith(""".w""" ):
UpperCamelCase_: str = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
UpperCamelCase_: Optional[Any] = old_dic[k]
else:
UpperCamelCase_: Dict = old_dic[k]
UpperCamelCase_: Dict = """vqvae""" if i == 0 else F'''priors.{3 - i}'''
UpperCamelCase_: Tuple = fix_jukebox_keys(lowercase__ , model.state_dict() , lowercase__ , lowercase__ )
weight_dict.append(lowercase__ )
UpperCamelCase_: Union[str, Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(lowercase__ )
for i in range(len(lowercase__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , """w""" ) as txtfile:
json.dump(lowercase__ , lowercase__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
return weight_dict
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
lowerCamelCase_ : Optional[Any] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 719 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None ) -> Tuple:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
UpperCamelCase_: Dict = nn.Parameter(UpperCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
UpperCamelCase_: Any = nn.Parameter(UpperCamelCase__ )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
UpperCamelCase_: Dict = np.asarray(weights[0] )
UpperCamelCase_: List[Any] = np.asarray(weights[1] )
UpperCamelCase_: List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
# set torch weights for 1-to-1 comparison
UpperCamelCase_: Tuple = np.asarray(weights[0] )
UpperCamelCase_: Any = np.asarray(weights[1] )
UpperCamelCase_: List[Any] = np.asarray(weights[2] )
UpperCamelCase_: List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
# layernorm 1
UpperCamelCase_: str = weights[0][0][0]
UpperCamelCase_: Optional[int] = np.asarray(layer_norm_a[0] )
UpperCamelCase_: Tuple = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# lsh weights + output
UpperCamelCase_: List[Any] = weights[0][1]
if len(UpperCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
else:
set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
# intermediate weighs
UpperCamelCase_: int = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase__ ) == 4:
UpperCamelCase_: Dict = intermediate_weights[2]
# layernorm 2
UpperCamelCase_: Optional[int] = np.asarray(intermediate_weights[0][0] )
UpperCamelCase_: Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# intermediate dense
UpperCamelCase_: Optional[Any] = np.asarray(intermediate_weights[1][0] )
UpperCamelCase_: Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
# intermediate out
UpperCamelCase_: Optional[int] = np.asarray(intermediate_weights[4][0] )
UpperCamelCase_: List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
# reformer model
UpperCamelCase_: List[Any] = torch_model.reformer
# word embeds
UpperCamelCase_: Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , )
if isinstance(weights[3] , UpperCamelCase__ ):
UpperCamelCase_: Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
UpperCamelCase_: str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
UpperCamelCase_: Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) )
UpperCamelCase_: int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
UpperCamelCase_: Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# output layer norm
UpperCamelCase_: Any = np.asarray(weights[7][0] )
UpperCamelCase_: List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# output embeddings
UpperCamelCase_: List[Any] = np.asarray(weights[9][0] )
UpperCamelCase_: Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
# Initialise PyTorch model
UpperCamelCase_: Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: str = ReformerModelWithLMHead(UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as f:
UpperCamelCase_: str = pickle.load(UpperCamelCase__ )["""weights"""]
set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
lowerCamelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 720 |
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[Any] = Text("""CPU""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(1 )]
UpperCamelCase_: Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[int] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
UpperCamelCase_: Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""Model""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
UpperCamelCase_: List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Tuple = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
UpperCamelCase_: int = 0.46 / 4
UpperCamelCase_: Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 670 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : List[str] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = """open-llama"""
def __init__( self : List[Any] , snake_case_ : Any=10_0000 , snake_case_ : Union[str, Any]=4096 , snake_case_ : int=1_1008 , snake_case_ : Any=32 , snake_case_ : int=32 , snake_case_ : Union[str, Any]="silu" , snake_case_ : List[str]=2048 , snake_case_ : List[Any]=0.02 , snake_case_ : Dict=1e-6 , snake_case_ : int=True , snake_case_ : Tuple=0 , snake_case_ : Tuple=1 , snake_case_ : Tuple=2 , snake_case_ : List[str]=False , snake_case_ : Optional[Any]=True , snake_case_ : int=0.1 , snake_case_ : str=0.1 , snake_case_ : Optional[int]=True , snake_case_ : Optional[int]=True , snake_case_ : int=None , **snake_case_ : str , ):
UpperCamelCase_: int = vocab_size
UpperCamelCase_: int = max_position_embeddings
UpperCamelCase_: Optional[Any] = hidden_size
UpperCamelCase_: List[str] = intermediate_size
UpperCamelCase_: Any = num_hidden_layers
UpperCamelCase_: Optional[int] = num_attention_heads
UpperCamelCase_: Any = hidden_act
UpperCamelCase_: Tuple = initializer_range
UpperCamelCase_: List[str] = rms_norm_eps
UpperCamelCase_: List[Any] = use_cache
UpperCamelCase_: List[str] = kwargs.pop(
"""use_memorry_efficient_attention""" , UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = hidden_dropout_prob
UpperCamelCase_: Union[str, Any] = attention_dropout_prob
UpperCamelCase_: Union[str, Any] = use_stable_embedding
UpperCamelCase_: List[str] = shared_input_output_embedding
UpperCamelCase_: Any = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , tie_word_embeddings=UpperCAmelCase__ , **UpperCAmelCase__ , )
def lowerCAmelCase__ ( self : str ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCAmelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
UpperCamelCase_: Tuple = self.rope_scaling.get("""type""" , UpperCAmelCase__ )
UpperCamelCase_: Dict = self.rope_scaling.get("""factor""" , UpperCAmelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 721 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = """laion/clap-htsat-unfused"""
UpperCamelCase_: List[str] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Dict = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[str] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Optional[Any] = floats_list((3, 1000) )
UpperCamelCase_: List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: int = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.get_feature_extractor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Dict = """This is a test string"""
UpperCamelCase_: Tuple = processor(text=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = self.get_feature_extractor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Tuple = processor.batch_decode(snake_case_ )
UpperCamelCase_: str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = self.get_feature_extractor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 670 | 0 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , snake_case_ : Tuple ):
UpperCamelCase_: Dict = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
UpperCamelCase_: str = len(A__ ) - 1
def lowerCAmelCase__ ( self : Any , snake_case_ : List[Any] ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCamelCase_: Union[str, Any] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , A__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(A__ ) , 5 ) == 1
return output_values
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : int ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCamelCase_: str = self.basis_function(A__ )
UpperCamelCase_: Tuple = 0.0
UpperCamelCase_: int = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCAmelCase__ ( self : Any , snake_case_ : str = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
UpperCamelCase_: List[Any] = [] # x coordinates of points to plot
UpperCamelCase_: Optional[Any] = [] # y coordinates of points to plot
UpperCamelCase_: Optional[int] = 0.0
while t <= 1:
UpperCamelCase_: Optional[Any] = self.bezier_curve_function(A__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
UpperCamelCase_: Tuple = [i[0] for i in self.list_of_points]
UpperCamelCase_: Any = [i[1] for i in self.list_of_points]
plt.plot(
A__ , A__ , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(A__ , A__ , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 700 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Tuple=None , **snake_case_ : List[str] ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case_ , )
super().__init__(args=snake_case_ , **snake_case_ )
| 670 | 0 |
def A__ ( lowerCamelCase ) -> bool:
if num < 0:
return False
UpperCamelCase_: Dict = num
UpperCamelCase_: Union[str, Any] = 0
while num > 0:
UpperCamelCase_: List[str] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = logging.get_logger("""transformers.models.speecht5""")
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
hf_model.apply_weight_norm()
UpperCamelCase_: Union[str, Any] = checkpoint["""input_conv.weight_g"""]
UpperCamelCase_: Optional[int] = checkpoint["""input_conv.weight_v"""]
UpperCamelCase_: List[Any] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCamelCase_: Dict = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCamelCase_: Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCamelCase_: int = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCamelCase_: int = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase_: Tuple = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase_: List[str] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if config_path is not None:
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_: str = SpeechTaHifiGanConfig()
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGan(lowerCamelCase )
UpperCamelCase_: str = torch.load(lowerCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = np.load(lowerCamelCase )
UpperCamelCase_: int = stats[0].reshape(-1 )
UpperCamelCase_: Union[str, Any] = stats[1].reshape(-1 )
UpperCamelCase_: Dict = torch.from_numpy(lowerCamelCase ).float()
UpperCamelCase_: Optional[Any] = torch.from_numpy(lowerCamelCase ).float()
model.save_pretrained(lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 670 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( __snake_case ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = ["""image_processor""", """tokenizer"""]
__UpperCamelCase : Any = """AutoImageProcessor"""
__UpperCamelCase : Any = """AutoTokenizer"""
def __init__( self : Tuple , snake_case_ : str , snake_case_ : List[str] ):
super().__init__(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_: str = self.image_processor
def __call__( self : str , snake_case_ : int=None , snake_case_ : Any=None , snake_case_ : int=None , **snake_case_ : Optional[int] ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCamelCase_: List[str] = self.tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if images is not None:
UpperCamelCase_: List[str] = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if text is not None and images is not None:
UpperCamelCase_: Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def lowerCAmelCase__ ( self : List[str] , *snake_case_ : List[str] , **snake_case_ : Optional[int] ):
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self : Dict , *snake_case_ : Any , **snake_case_ : Any ):
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
return ["input_ids", "attention_mask", "pixel_values"]
| 702 |
lowerCamelCase_ : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Optional[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 670 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( a__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = (DDIMParallelScheduler,)
__UpperCamelCase : int = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def lowerCAmelCase__ ( self : List[Any] , **snake_case_ : List[str] ):
UpperCamelCase_: str = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**lowercase__ )
return config
def lowerCAmelCase__ ( self : Union[str, Any] , **snake_case_ : str ):
UpperCamelCase_: List[Any] = self.scheduler_classes[0]
UpperCamelCase_: List[Any] = self.get_scheduler_config(**lowercase__ )
UpperCamelCase_: str = scheduler_class(**lowercase__ )
UpperCamelCase_: Union[str, Any] = 10, 0.0
UpperCamelCase_: List[str] = self.dummy_model()
UpperCamelCase_: Tuple = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for t in scheduler.timesteps:
UpperCamelCase_: Union[str, Any] = model(lowercase__ , lowercase__ )
UpperCamelCase_: Any = scheduler.step(lowercase__ , lowercase__ , lowercase__ , lowercase__ ).prev_sample
return sample
def lowerCAmelCase__ ( self : List[Any] ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowercase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase__ )
UpperCamelCase_: int = self.scheduler_classes[0]
UpperCamelCase_: int = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase_: Optional[int] = scheduler_class(**lowercase__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def lowerCAmelCase__ ( self : List[str] ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase__ , beta_end=lowercase__ )
def lowerCAmelCase__ ( self : str ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase__ )
def lowerCAmelCase__ ( self : List[str] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase__ )
def lowerCAmelCase__ ( self : List[str] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase__ )
def lowerCAmelCase__ ( self : int ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowercase__ )
def lowerCAmelCase__ ( self : Any ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowercase__ )
def lowerCAmelCase__ ( self : Any ):
self.check_over_configs(thresholding=lowercase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase__ , prediction_type=lowercase__ , sample_max_value=lowercase__ , )
def lowerCAmelCase__ ( self : int ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowercase__ )
def lowerCAmelCase__ ( self : Optional[int] ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowercase__ , num_inference_steps=lowercase__ )
def lowerCAmelCase__ ( self : Optional[Any] ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowercase__ , eta=lowercase__ )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[Any] = self.scheduler_classes[0]
UpperCamelCase_: List[Any] = self.get_scheduler_config()
UpperCamelCase_: Union[str, Any] = scheduler_class(**lowercase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Any = self.scheduler_classes[0]
UpperCamelCase_: Optional[int] = self.get_scheduler_config()
UpperCamelCase_: Optional[int] = scheduler_class(**lowercase__ )
UpperCamelCase_: Tuple = 10, 0.0
scheduler.set_timesteps(lowercase__ )
UpperCamelCase_: Tuple = self.dummy_model()
UpperCamelCase_: Any = self.dummy_sample_deter
UpperCamelCase_: Tuple = self.dummy_sample_deter + 0.1
UpperCamelCase_: Optional[Any] = self.dummy_sample_deter - 0.1
UpperCamelCase_: Optional[int] = samplea.shape[0]
UpperCamelCase_: List[str] = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase_: Optional[Any] = torch.arange(lowercase__ )[0:3, None].repeat(1 , lowercase__ )
UpperCamelCase_: List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase_: Dict = scheduler.batch_step_no_noise(lowercase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowercase__ )
UpperCamelCase_: List[Any] = torch.sum(torch.abs(lowercase__ ) )
UpperCamelCase_: Optional[int] = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: List[Any] = self.full_loop()
UpperCamelCase_: int = torch.sum(torch.abs(lowercase__ ) )
UpperCamelCase_: int = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.22_3967 ) < 1e-3
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = self.full_loop(prediction_type="""v_prediction""" )
UpperCamelCase_: int = torch.sum(torch.abs(lowercase__ ) )
UpperCamelCase_: Optional[Any] = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Tuple = self.full_loop(set_alpha_to_one=lowercase__ , beta_start=0.01 )
UpperCamelCase_: str = torch.sum(torch.abs(lowercase__ ) )
UpperCamelCase_: Optional[Any] = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Optional[Any] = self.full_loop(set_alpha_to_one=lowercase__ , beta_start=0.01 )
UpperCamelCase_: Dict = torch.sum(torch.abs(lowercase__ ) )
UpperCamelCase_: Optional[Any] = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 703 |
import cva
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , snake_case_ : float , snake_case_ : int ):
if k in (0.04, 0.06):
UpperCamelCase_: Union[str, Any] = k
UpperCamelCase_: Union[str, Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : int ):
return str(self.k )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : str ):
UpperCamelCase_: int = cva.imread(snake_case_ , 0 )
UpperCamelCase_, UpperCamelCase_: List[Any] = img.shape
UpperCamelCase_: list[list[int]] = []
UpperCamelCase_: int = img.copy()
UpperCamelCase_: Any = cva.cvtColor(snake_case_ , cva.COLOR_GRAY2RGB )
UpperCamelCase_, UpperCamelCase_: List[Any] = np.gradient(snake_case_ )
UpperCamelCase_: Optional[Any] = dx**2
UpperCamelCase_: Dict = dy**2
UpperCamelCase_: Optional[Any] = dx * dy
UpperCamelCase_: str = 0.04
UpperCamelCase_: int = self.window_size // 2
for y in range(snake_case_ , h - offset ):
for x in range(snake_case_ , w - offset ):
UpperCamelCase_: List[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = (wxx * wyy) - (wxy**2)
UpperCamelCase_: Optional[int] = wxx + wyy
UpperCamelCase_: Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = HarrisCorner(0.04, 3)
lowerCamelCase_ , lowerCamelCase_ : Any = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 670 | 0 |
from __future__ import annotations
import math
def A__ ( lowerCamelCase ) -> List[Any]:
if num <= 0:
UpperCamelCase_: Tuple = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
UpperCamelCase_: List[Any] = [True] * (num + 1)
UpperCamelCase_: List[Any] = []
UpperCamelCase_: Union[str, Any] = 2
UpperCamelCase_: List[str] = int(math.sqrt(SCREAMING_SNAKE_CASE__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE__ ):
if sieve[i] is True:
UpperCamelCase_: Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 704 |
import random
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False ) -> dict:
UpperCamelCase_: dict = {i: [] for i in range(lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase ):
for j in range(i + 1 , lowerCamelCase ):
if random.random() < probability:
graph[i].append(lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase )
return graph
def A__ ( lowerCamelCase ) -> dict:
return {
i: [j for j in range(lowerCamelCase ) if i != j] for i in range(lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 0 |
from __future__ import annotations
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case_ : List[str]=None ):
UpperCamelCase_: List[str] = data
UpperCamelCase_: str = None
def __repr__( self : int ):
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Optional[Any] = self
while temp:
string_rep.append(f'''{temp.data}''' )
UpperCamelCase_: str = temp.next
return "->".join(__snake_case )
def A__ ( lowerCamelCase ) -> List[str]:
if not elements_list:
raise Exception("""The Elements List is empty""" )
UpperCamelCase_: Optional[int] = Node(elements_list[0] )
for i in range(1 , len(a_ ) ):
UpperCamelCase_: List[str] = Node(elements_list[i] )
UpperCamelCase_: str = current.next
return head
def A__ ( lowerCamelCase ) -> Union[str, Any]:
if head_node is not None and isinstance(a_ , a_ ):
print_reverse(head_node.next )
print(head_node.data )
def A__ ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
UpperCamelCase_: List[Any] = make_linked_list([14, 52, 14, 12, 43] )
print("""Linked List:""" )
print(a_ )
print("""Elements in Reverse:""" )
print_reverse(a_ )
if __name__ == "__main__":
main()
| 705 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Optional[int] = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase_: Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = logging.get_verbosity()
UpperCamelCase_: int = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Union[str, Any] = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(snake_case_ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def lowerCAmelCase__ ( self : Optional[int] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: str = os.getenv("""TRANSFORMERS_VERBOSITY""" , snake_case_ )
UpperCamelCase_: Any = logging.log_levels[env_level_str]
UpperCamelCase_: Dict = logging.get_verbosity()
self.assertEqual(
snake_case_ , snake_case_ , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCamelCase_: str = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def lowerCAmelCase__ ( self : List[Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: str = logging.logging.getLogger()
with CaptureLogger(snake_case_ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def lowerCAmelCase__ ( self : List[Any] ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Any = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
def A__ ( ) -> Union[str, Any]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 670 | 0 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
UpperCamelCase_: Dict = (boundary[1] - boundary[0]) / steps
UpperCamelCase_: Optional[int] = boundary[0]
UpperCamelCase_: Any = boundary[1]
UpperCamelCase_: Dict = make_points(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Any = 0.0
y += (h / 2.0) * f(lowerCamelCase )
for i in x_i:
# print(i)
y += h * f(lowerCamelCase )
y += (h / 2.0) * f(lowerCamelCase )
return y
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
UpperCamelCase_: List[str] = a + h
while x < (b - h):
yield x
UpperCamelCase_: Any = x + h
def A__ ( lowerCamelCase ) -> int: # enter your function here
UpperCamelCase_: int = (x - 0) * (x - 0)
return y
def A__ ( ) -> Tuple:
UpperCamelCase_: Optional[Any] = 0.0 # Lower bound of integration
UpperCamelCase_: Optional[int] = 1.0 # Upper bound of integration
UpperCamelCase_: Optional[int] = 10.0 # define number of steps or resolution
UpperCamelCase_: str = [a, b] # define boundary of integration
UpperCamelCase_: Optional[int] = method_a(lowerCamelCase , lowerCamelCase )
print(F'''y = {y}''' )
if __name__ == "__main__":
main()
| 706 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase_ : Optional[int] = HUGGINGFACE_HUB_CACHE
lowerCamelCase_ : List[str] = """config.json"""
lowerCamelCase_ : Any = """diffusion_pytorch_model.bin"""
lowerCamelCase_ : Union[str, Any] = """diffusion_flax_model.msgpack"""
lowerCamelCase_ : Dict = """model.onnx"""
lowerCamelCase_ : List[Any] = """diffusion_pytorch_model.safetensors"""
lowerCamelCase_ : Optional[Any] = """weights.pb"""
lowerCamelCase_ : Optional[Any] = """https://huggingface.co"""
lowerCamelCase_ : Union[str, Any] = default_cache_path
lowerCamelCase_ : Tuple = """diffusers_modules"""
lowerCamelCase_ : Optional[Any] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowerCamelCase_ : str = ["""fp16""", """non-ema"""]
lowerCamelCase_ : List[Any] = """.self_attn"""
| 670 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase_ : List[Any] = {
"""configuration_mask2former""": [
"""MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Mask2FormerConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = ["""Mask2FormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = [
"""MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Mask2FormerForUniversalSegmentation""",
"""Mask2FormerModel""",
"""Mask2FormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 707 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase_: List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCamelCase_: str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Any = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
UpperCamelCase_: Dict = [sys.executable] + distributed_args
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
| 670 | 0 |
import argparse
lowerCamelCase_ : str = """docs/source/_static/js/custom.js"""
def A__ ( lowerCamelCase ) -> Dict:
with open(__UpperCamelCase , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase_: Any = f.readlines()
UpperCamelCase_: str = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
UpperCamelCase_: Union[str, Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
update_custom_js(args.version)
| 708 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = BarthezTokenizer
__UpperCamelCase : str = BarthezTokenizerFast
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = True
def lowerCAmelCase__ ( self : Optional[int] ):
super().setUp()
UpperCamelCase_: Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ )
UpperCamelCase_: Dict = tokenizer
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: str = """<pad>"""
UpperCamelCase_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case_ ) , 10_1122 )
def lowerCAmelCase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_: Union[str, Any] = [0, 57, 3018, 7_0307, 91, 2]
UpperCamelCase_: Union[str, Any] = self.tokenizer(
snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase_: Any = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
if not self.test_rust_tokenizer:
return
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase_: str = """I was born in 92000, and this is falsé."""
UpperCamelCase_: str = tokenizer.tokenize(snake_case_ )
UpperCamelCase_: int = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase_: int = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = self.get_rust_tokenizer()
UpperCamelCase_: Tuple = tokenizer.encode(snake_case_ )
UpperCamelCase_: Tuple = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase__ ( self : int ):
# fmt: off
UpperCamelCase_: Optional[Any] = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase_: str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=snake_case_ , )
| 670 | 0 |
from __future__ import annotations
def A__ ( lowerCamelCase ) -> str:
if len(lowerCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
UpperCamelCase_: List[str] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
while second != 0:
UpperCamelCase_: Optional[Any] = first & second
first ^= second
UpperCamelCase_: Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : List[Any] = int(input("""Enter the first number: """).strip())
lowerCamelCase_ : Tuple = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 670 | 0 |
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Tuple ):
UpperCamelCase_: dict[str, TrieNode] = {} # Mapping from char to TrieNode
UpperCamelCase_: Optional[Any] = False
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Optional[Any] ):
for word in words:
self.insert(_a )
def lowerCAmelCase__ ( self : int , snake_case_ : Optional[int] ):
UpperCamelCase_: Optional[Any] = self
for char in word:
if char not in curr.nodes:
UpperCamelCase_: Optional[Any] = TrieNode()
UpperCamelCase_: List[Any] = curr.nodes[char]
UpperCamelCase_: Optional[int] = True
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[Any] ):
UpperCamelCase_: Union[str, Any] = self
for char in word:
if char not in curr.nodes:
return False
UpperCamelCase_: str = curr.nodes[char]
return curr.is_leaf
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
def _delete(snake_case_ : int , snake_case_ : Tuple , snake_case_ : Tuple ) -> bool:
if index == len(_a ):
# If word does not exist
if not curr.is_leaf:
return False
UpperCamelCase_: Any = False
return len(curr.nodes ) == 0
UpperCamelCase_: List[Any] = word[index]
UpperCamelCase_: Optional[Any] = curr.nodes.get(_a )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
UpperCamelCase_: List[str] = _delete(_a , _a , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _a , 0 )
def A__ ( lowerCamelCase , lowerCamelCase ) -> Tuple:
if node.is_leaf:
print(snake_case_ , end=""" """ )
for key, value in node.nodes.items():
print_words(snake_case_ , word + key )
def A__ ( ) -> Tuple:
UpperCamelCase_: int = """banana bananas bandana band apple all beast""".split()
UpperCamelCase_: Optional[int] = TrieNode()
root.insert_many(snake_case_ )
# print_words(root, "")
assert all(root.find(snake_case_ ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def A__ ( lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
print(str(snake_case_ ) , """works!""" if passes else """doesn't work :(""" )
def A__ ( ) -> List[Any]:
assert test_trie()
def A__ ( ) -> List[str]:
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 710 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCamelCase_ : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCamelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Optional[str] = field(default=_A , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCAmelCase__ ( self : Dict ):
if self.train_file is not None:
UpperCamelCase_: Union[str, Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCamelCase_: Dict = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : PreTrainedTokenizerBase
__UpperCamelCase : Union[bool, str, PaddingStrategy] = True
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[int] = None
def __call__( self : Optional[int] , snake_case_ : Dict ):
UpperCamelCase_: Dict = """label""" if """label""" in features[0].keys() else """labels"""
UpperCamelCase_: int = [feature.pop(snake_case_ ) for feature in features]
UpperCamelCase_: Optional[Any] = len(snake_case_ )
UpperCamelCase_: List[str] = len(features[0]["""input_ids"""] )
UpperCamelCase_: Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
UpperCamelCase_: Any = list(chain(*snake_case_ ) )
UpperCamelCase_: List[Any] = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
UpperCamelCase_: Tuple = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
UpperCamelCase_: Optional[int] = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def A__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_: str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase_: Dict = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCamelCase_: List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase_: List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCamelCase_: List[str] = {}
if data_args.train_file is not None:
UpperCamelCase_: List[Any] = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase_: Optional[int] = data_args.validation_file
UpperCamelCase_: Any = data_args.train_file.split(""".""" )[-1]
UpperCamelCase_: Tuple = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCamelCase_: int = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCamelCase_: Union[str, Any] = [F'''ending{i}''' for i in range(4 )]
UpperCamelCase_: str = """sent1"""
UpperCamelCase_: List[str] = """sent2"""
if data_args.max_seq_length is None:
UpperCamelCase_: int = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
UpperCamelCase_: Optional[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
UpperCamelCase_: Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase ):
UpperCamelCase_: Optional[Any] = [[context] * 4 for context in examples[context_name]]
UpperCamelCase_: Dict = examples[question_header_name]
UpperCamelCase_: List[str] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
UpperCamelCase_: str = list(chain(*lowerCamelCase ) )
UpperCamelCase_: Any = list(chain(*lowerCamelCase ) )
# Tokenize
UpperCamelCase_: Any = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
UpperCamelCase_: str = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
UpperCamelCase_: Union[str, Any] = min(len(lowerCamelCase ) , data_args.max_train_samples )
UpperCamelCase_: Optional[int] = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
UpperCamelCase_: str = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
UpperCamelCase_: Dict = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
UpperCamelCase_: str = min(len(lowerCamelCase ) , data_args.max_eval_samples )
UpperCamelCase_: Tuple = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
UpperCamelCase_: str = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCamelCase_: str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase ):
UpperCamelCase_, UpperCamelCase_: List[str] = eval_predictions
UpperCamelCase_: Optional[Any] = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCamelCase_: Union[str, Any] = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase_: List[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase_: int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase_: str = last_checkpoint
UpperCamelCase_: Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase_: Tuple = train_result.metrics
UpperCamelCase_: Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""train""" , lowerCamelCase )
trainer.save_metrics("""train""" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase_: Optional[Any] = trainer.evaluate()
UpperCamelCase_: Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""eval""" , lowerCamelCase )
trainer.save_metrics("""eval""" , lowerCamelCase )
UpperCamelCase_: Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def A__ ( lowerCamelCase ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 670 | 0 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def lowerCAmelCase__ ( *snake_case_ : Optional[int] , **snake_case_ : int ):
pass
def A__ ( lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCamelCase_ : Dict = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : int = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int , snake_case_ : List[str] , snake_case_ : List[str] ):
UpperCamelCase_: Dict = pipeline(
"""document-question-answering""" , model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCamelCase_: int = INVOICE_URL
UpperCamelCase_: Union[str, Any] = list(zip(*apply_tesseract(load_image(UpperCamelCase__ ) , UpperCamelCase__ , """""" ) ) )
UpperCamelCase_: Dict = '''What is the placebo?'''
UpperCamelCase_: int = [
{
'''image''': load_image(UpperCamelCase__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Any , snake_case_ : str ):
UpperCamelCase_: List[Any] = dqa_pipeline(UpperCamelCase__ , top_k=2 )
self.assertEqual(
UpperCamelCase__ , [
[
{"""score""": ANY(UpperCamelCase__ ), """answer""": ANY(UpperCamelCase__ ), """start""": ANY(UpperCamelCase__ ), """end""": ANY(UpperCamelCase__ )},
{"""score""": ANY(UpperCamelCase__ ), """answer""": ANY(UpperCamelCase__ ), """start""": ANY(UpperCamelCase__ ), """end""": ANY(UpperCamelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Tuple = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
UpperCamelCase_: str = INVOICE_URL
UpperCamelCase_: Union[str, Any] = '''How many cats are there?'''
UpperCamelCase_: str = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
UpperCamelCase_: str = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase__ , decimals=4 ) , UpperCamelCase__ )
UpperCamelCase_: List[str] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase__ , decimals=4 ) , UpperCamelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
UpperCamelCase_: int = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCamelCase_: Dict = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(UpperCamelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
UpperCamelCase_: Optional[Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCamelCase_: int = []
UpperCamelCase_: Dict = []
UpperCamelCase_: Dict = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , words=UpperCamelCase__ , boxes=UpperCamelCase__ , top_k=2 )
self.assertEqual(UpperCamelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Any = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
UpperCamelCase_: Any = INVOICE_URL
UpperCamelCase_: Dict = '''What is the invoice number?'''
UpperCamelCase_: Any = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
UpperCamelCase_: int = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
UpperCamelCase_: List[str] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: str = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , )
UpperCamelCase_: Dict = INVOICE_URL
UpperCamelCase_: int = '''What is the invoice number?'''
UpperCamelCase_: List[Any] = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
UpperCamelCase_: Union[str, Any] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
UpperCamelCase_: Optional[int] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Tuple = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=UpperCamelCase__ )
UpperCamelCase_: Dict = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=UpperCamelCase__ , revision="""3dc6de3""" , )
UpperCamelCase_: Tuple = INVOICE_URL
UpperCamelCase_: Tuple = '''What is the invoice number?'''
UpperCamelCase_: Union[str, Any] = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
UpperCamelCase_: str = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
UpperCamelCase_: Optional[int] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 , )
UpperCamelCase_: Union[str, Any] = list(zip(*apply_tesseract(load_image(UpperCamelCase__ ) , UpperCamelCase__ , """""" ) ) )
# This model should also work if `image` is set to None
UpperCamelCase_: Tuple = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: int = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=UpperCamelCase__ )
UpperCamelCase_: Optional[int] = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=UpperCamelCase__ , revision="""3dc6de3""" , max_seq_len=50 , )
UpperCamelCase_: List[Any] = INVOICE_URL
UpperCamelCase_: Union[str, Any] = '''What is the invoice number?'''
UpperCamelCase_: Optional[int] = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
UpperCamelCase_: List[str] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
UpperCamelCase_: int = list(zip(*apply_tesseract(load_image(UpperCamelCase__ ) , UpperCamelCase__ , """""" ) ) )
# This model should also work if `image` is set to None
UpperCamelCase_: int = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
@slow
@require_torch
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Any = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
UpperCamelCase_: Dict = INVOICE_URL
UpperCamelCase_: Optional[Any] = '''What is the invoice number?'''
UpperCamelCase_: Optional[int] = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase__ , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def lowerCAmelCase__ ( self : Union[str, Any] ):
pass
| 711 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : Union[str, Any] = logging.getLogger()
lowerCamelCase_ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Dict ):
os.makedirs(snake_case_ , exist_ok=snake_case_ )
UpperCamelCase_: int = {"""source""": """What is love ?""", """target""": """life"""}
UpperCamelCase_: Tuple = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase_: Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(snake_case_ , f'''{split}.{field}''' ) , """w""" ) as f:
f.write(snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : int , snake_case_ : str = "pytorch" ):
UpperCamelCase_: Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Dict = os.path.join(snake_case_ , """output""" )
UpperCamelCase_: Any = os.path.join(snake_case_ , """data""" )
self._create_dummy_data(data_dir=snake_case_ )
UpperCamelCase_: Union[str, Any] = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
UpperCamelCase_: Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(snake_case_ , env=self.get_env() )
UpperCamelCase_: Optional[int] = os.path.join(snake_case_ , """metrics.json""" )
with open(snake_case_ ) as f:
UpperCamelCase_: Any = json.load(snake_case_ )
return result
@require_torch_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 670 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : str = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase : Dict = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCAmelCase__ ( self : Dict , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : List[Any] ):
UpperCamelCase_: Dict = TextaTextGenerationPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
return generator, ["Something to write", "Something else"]
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : List[Any] , snake_case_ : List[Any] ):
UpperCamelCase_: Union[str, Any] = generator("""Something there""" )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
UpperCamelCase_: Optional[int] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
[{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}],
[{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}],
] , )
UpperCamelCase_: List[Any] = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
[{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}],
[{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}],
] , )
with self.assertRaises(_lowerCamelCase ):
generator(4 )
@require_torch
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: str = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
UpperCamelCase_: Any = generator("""Something there""" , do_sample=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": """"""}] )
UpperCamelCase_: List[Any] = 3
UpperCamelCase_: Any = generator(
"""Something there""" , num_return_sequences=_lowerCamelCase , num_beams=_lowerCamelCase , )
UpperCamelCase_: Any = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: str = generator("""This is a test""" , do_sample=_lowerCamelCase , num_return_sequences=2 , return_tensors=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
UpperCamelCase_: Optional[Any] = generator.model.config.eos_token_id
UpperCamelCase_: Optional[Any] = """<pad>"""
UpperCamelCase_: Tuple = generator(
["""This is a test""", """This is a second test"""] , do_sample=_lowerCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=_lowerCamelCase , )
self.assertEqual(
_lowerCamelCase , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: str = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
UpperCamelCase_: str = generator("""Something there""" , do_sample=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": """"""}] )
| 712 |
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : int , snake_case_ : Optional[Any]=None , snake_case_ : List[str]=None ):
UpperCamelCase_: List[Any] = data
UpperCamelCase_: List[Any] = previous
UpperCamelCase_: Tuple = next_node
def __str__( self : Dict ):
return f'''{self.data}'''
def lowerCAmelCase__ ( self : List[str] ):
return self.data
def lowerCAmelCase__ ( self : Any ):
return self.next
def lowerCAmelCase__ ( self : List[str] ):
return self.previous
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = head
def __iter__( self : Union[str, Any] ):
return self
def lowerCAmelCase__ ( self : Union[str, Any] ):
if not self.current:
raise StopIteration
else:
UpperCamelCase_: Dict = self.current.get_data()
UpperCamelCase_: Tuple = self.current.get_next()
return value
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int ):
UpperCamelCase_: Optional[int] = None # First node in list
UpperCamelCase_: Dict = None # Last node in list
def __str__( self : Tuple ):
UpperCamelCase_: int = self.head
UpperCamelCase_: Tuple = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase_: List[str] = current.get_next()
return " ".join(str(snake_case_ ) for node in nodes )
def __contains__( self : int , snake_case_ : int ):
UpperCamelCase_: Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase_: Any = current.get_next()
return False
def __iter__( self : Any ):
return LinkedListIterator(self.head )
def lowerCAmelCase__ ( self : Tuple ):
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Node ):
if self.head is None:
UpperCamelCase_: Tuple = node
UpperCamelCase_: Optional[int] = node
else:
self.insert_before_node(self.head , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node ):
if self.head is None:
self.set_head(snake_case_ )
else:
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int ):
UpperCamelCase_: Any = Node(snake_case_ )
if self.head is None:
self.set_head(snake_case_ )
else:
self.set_tail(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: str = node
UpperCamelCase_: int = node.previous
if node.get_previous() is None:
UpperCamelCase_: int = node_to_insert
else:
UpperCamelCase_: Dict = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Dict , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: Tuple = node
UpperCamelCase_: Dict = node.next
if node.get_next() is None:
UpperCamelCase_: Union[str, Any] = node_to_insert
else:
UpperCamelCase_: str = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: List[str] = Node(snake_case_ )
UpperCamelCase_: Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case_ , snake_case_ )
return
current_position += 1
UpperCamelCase_: Dict = node.next
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase_: List[Any] = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[str] ):
if (node := self.get_node(snake_case_ )) is not None:
if node == self.head:
UpperCamelCase_: Optional[int] = self.head.get_next()
if node == self.tail:
UpperCamelCase_: Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(snake_case_ )
@staticmethod
def lowerCAmelCase__ ( snake_case_ : Node ):
if node.get_next():
UpperCamelCase_: str = node.previous
if node.get_previous():
UpperCamelCase_: int = node.next
UpperCamelCase_: List[str] = None
UpperCamelCase_: int = None
def lowerCAmelCase__ ( self : str ):
return self.head is None
def A__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 0 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( _A , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
@property
def lowerCAmelCase__ ( self : str ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Optional[int] = ort.SessionOptions()
UpperCamelCase_: Any = False
return options
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
UpperCamelCase_: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
UpperCamelCase_: List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Tuple = "A red cat sitting on a park bench"
UpperCamelCase_: Optional[Any] = np.random.RandomState(0 )
UpperCamelCase_: List[str] = pipe(
prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type="""np""" , )
UpperCamelCase_: int = output.images
UpperCamelCase_: Optional[int] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase_: Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
UpperCamelCase_: List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
UpperCamelCase_: Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
UpperCamelCase_: Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Tuple = "A red cat sitting on a park bench"
UpperCamelCase_: Dict = np.random.RandomState(0 )
UpperCamelCase_: Optional[Any] = pipe(
prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case_ , output_type="""np""" , )
UpperCamelCase_: Tuple = output.images
UpperCamelCase_: str = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase_: List[Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 713 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase_ : Tuple = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class _UpperCamelCase ( __lowercase ):
'''simple docstring'''
__UpperCamelCase : List[str] = 'data2vec-text'
def __init__( self : Tuple , snake_case_ : int=3_0522 , snake_case_ : Optional[Any]=768 , snake_case_ : str=12 , snake_case_ : Any=12 , snake_case_ : Any=3072 , snake_case_ : str="gelu" , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : Any=512 , snake_case_ : List[str]=2 , snake_case_ : Tuple=0.02 , snake_case_ : Tuple=1e-12 , snake_case_ : Optional[int]=1 , snake_case_ : List[Any]=0 , snake_case_ : List[Any]=2 , snake_case_ : Dict="absolute" , snake_case_ : str=True , snake_case_ : Any=None , **snake_case_ : int , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
UpperCamelCase_: Optional[int] = vocab_size
UpperCamelCase_: str = hidden_size
UpperCamelCase_: List[Any] = num_hidden_layers
UpperCamelCase_: List[str] = num_attention_heads
UpperCamelCase_: Optional[Any] = hidden_act
UpperCamelCase_: Dict = intermediate_size
UpperCamelCase_: List[str] = hidden_dropout_prob
UpperCamelCase_: str = attention_probs_dropout_prob
UpperCamelCase_: List[str] = max_position_embeddings
UpperCamelCase_: int = type_vocab_size
UpperCamelCase_: Dict = initializer_range
UpperCamelCase_: str = layer_norm_eps
UpperCamelCase_: str = position_embedding_type
UpperCamelCase_: Union[str, Any] = use_cache
UpperCamelCase_: Tuple = classifier_dropout
class _UpperCamelCase ( __lowercase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : List[Any] ):
if self.task == "multiple-choice":
UpperCamelCase_: Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCamelCase_: int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 714 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : int ):
torch.manual_seed(0 )
UpperCamelCase_: Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCAmelCase__ ( self : Any ):
torch.manual_seed(0 )
UpperCamelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Union[str, Any] = self.dummy_uncond_unet
UpperCamelCase_: Optional[Any] = DDIMScheduler()
UpperCamelCase_: List[str] = self.dummy_vq_model
UpperCamelCase_: List[Any] = LDMPipeline(unet=snake_case_ , vqvae=snake_case_ , scheduler=snake_case_ )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = torch.manual_seed(0 )
UpperCamelCase_: int = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" ).images
UpperCamelCase_: Dict = torch.manual_seed(0 )
UpperCamelCase_: str = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=snake_case_ )[0]
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: str = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCamelCase_: Optional[Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = ldm(generator=snake_case_ , num_inference_steps=5 , output_type="""numpy""" ).images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase_: List[str] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCamelCase_: Dict = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 670 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCamelCase ( UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : str = KandinskyInpaintPipeline
__UpperCamelCase : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
__UpperCamelCase : Any = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
__UpperCamelCase : Dict = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__UpperCamelCase : List[str] = False
@property
def lowerCAmelCase__ ( self : Dict ):
return 32
@property
def lowerCAmelCase__ ( self : Optional[int] ):
return 32
@property
def lowerCAmelCase__ ( self : List[str] ):
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : Optional[int] ):
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Optional[int] ):
return 100
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def lowerCAmelCase__ ( self : List[str] ):
torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
UpperCamelCase_: Any = MultilingualCLIP(__A )
UpperCamelCase_: Tuple = text_encoder.eval()
return text_encoder
@property
def lowerCAmelCase__ ( self : List[str] ):
torch.manual_seed(0 )
UpperCamelCase_: str = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCamelCase_: Optional[int] = UNetaDConditionModel(**__A )
return model
@property
def lowerCAmelCase__ ( self : Any ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self : str ):
torch.manual_seed(0 )
UpperCamelCase_: List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: str = self.dummy_text_encoder
UpperCamelCase_: List[Any] = self.dummy_tokenizer
UpperCamelCase_: int = self.dummy_unet
UpperCamelCase_: List[str] = self.dummy_movq
UpperCamelCase_: List[str] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=__A , set_alpha_to_one=__A , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__A , )
UpperCamelCase_: Optional[int] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase__ ( self : Dict , snake_case_ : Optional[int] , snake_case_ : Union[str, Any]=0 ):
UpperCamelCase_: Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__A ) ).to(__A )
UpperCamelCase_: Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__A )
# create init_image
UpperCamelCase_: Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(__A ) ).to(__A )
UpperCamelCase_: Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase_: Union[str, Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
UpperCamelCase_: List[str] = np.ones((64, 64) , dtype=np.floataa )
UpperCamelCase_: str = 0
if str(__A ).startswith("""mps""" ):
UpperCamelCase_: str = torch.manual_seed(__A )
else:
UpperCamelCase_: int = torch.Generator(device=__A ).manual_seed(__A )
UpperCamelCase_: List[str] = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = """cpu"""
UpperCamelCase_: List[str] = self.get_dummy_components()
UpperCamelCase_: Any = self.pipeline_class(**__A )
UpperCamelCase_: Optional[int] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
UpperCamelCase_: List[Any] = pipe(**self.get_dummy_inputs(__A ) )
UpperCamelCase_: List[Any] = output.images
UpperCamelCase_: Tuple = pipe(
**self.get_dummy_inputs(__A ) , return_dict=__A , )[0]
UpperCamelCase_: Tuple = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: Tuple = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def lowerCAmelCase__ ( self : Dict ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
UpperCamelCase_: Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCamelCase_: Tuple = np.ones((768, 768) , dtype=np.floataa )
UpperCamelCase_: List[Any] = 0
UpperCamelCase_: Optional[Any] = """a hat"""
UpperCamelCase_: str = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__A )
UpperCamelCase_: str = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
UpperCamelCase_: Optional[int] = pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
UpperCamelCase_: Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase_, UpperCamelCase_: Tuple = pipe_prior(
__A , generator=__A , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCamelCase_: Tuple = pipeline(
__A , image=__A , mask_image=__A , image_embeds=__A , negative_image_embeds=__A , generator=__A , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
UpperCamelCase_: Any = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__A , __A )
| 715 |
def A__ ( lowerCamelCase = 50 ) -> int:
UpperCamelCase_: List[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 670 | 0 |
import random
def A__ ( lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: str = [], [], []
for element in data:
if element < pivot:
less.append(_UpperCAmelCase )
elif element > pivot:
greater.append(_UpperCAmelCase )
else:
equal.append(_UpperCAmelCase )
return less, equal, greater
def A__ ( lowerCamelCase , lowerCamelCase ) -> List[Any]:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_UpperCAmelCase ) or index < 0:
return None
UpperCamelCase_: List[str] = items[random.randint(0 , len(_UpperCAmelCase ) - 1 )]
UpperCamelCase_: Dict = 0
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = _partition(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_: str = len(_UpperCAmelCase )
UpperCamelCase_: Tuple = len(_UpperCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_UpperCAmelCase , _UpperCAmelCase )
# must be in larger
else:
return quick_select(_UpperCAmelCase , index - (m + count) )
| 716 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Initialise PyTorch model
UpperCamelCase_: List[Any] = TaConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Any = TaForConditionalGeneration(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 670 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCamelCase ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : str = RobertaTokenizer
__UpperCamelCase : Tuple = RobertaTokenizerFast
__UpperCamelCase : Tuple = True
__UpperCamelCase : Dict = {"""cls_token""": """<s>"""}
def lowerCAmelCase__ ( self : Optional[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase_: Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCamelCase_: Tuple = dict(zip(__a , range(len(__a ) ) ) )
UpperCamelCase_: List[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCamelCase_: Dict = {'unk_token': '<unk>'}
UpperCamelCase_: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase_: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def lowerCAmelCase__ ( self : List[str] , **snake_case_ : List[Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase__ ( self : Union[str, Any] , **snake_case_ : List[Any] ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Optional[Any] ):
UpperCamelCase_: Any = 'lower newer'
UpperCamelCase_: Optional[int] = 'lower newer'
return input_text, output_text
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Dict = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase_: Optional[Any] = 'lower newer'
UpperCamelCase_: Optional[int] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
UpperCamelCase_: Optional[int] = tokenizer.tokenize(__a ) # , add_prefix_space=True)
self.assertListEqual(__a , __a )
UpperCamelCase_: List[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase_: Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__a ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__a ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Tuple = self.tokenizer_class.from_pretrained("""roberta-base""" )
UpperCamelCase_: int = tokenizer.encode("""sequence builders""" , add_special_tokens=__a )
UpperCamelCase_: Any = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a )
UpperCamelCase_: Union[str, Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__a , add_prefix_space=__a )
UpperCamelCase_: List[Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__a , add_prefix_space=__a )
UpperCamelCase_: str = tokenizer.build_inputs_with_special_tokens(__a )
UpperCamelCase_: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Any = 'Encode this sequence.'
UpperCamelCase_: Dict = tokenizer.byte_encoder[' '.encode("""utf-8""" )[0]]
# Testing encoder arguments
UpperCamelCase_: List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
UpperCamelCase_: Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__a , __a )
UpperCamelCase_: Tuple = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
UpperCamelCase_: Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__a , __a )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
UpperCamelCase_: Union[str, Any] = tokenizer.encode(__a , add_special_tokens=__a )
UpperCamelCase_: List[str] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__a , __a )
# Testing spaces after special tokens
UpperCamelCase_: Dict = '<mask>'
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__a , lstrip=__a , rstrip=__a )} ) # mask token has a left space
UpperCamelCase_: List[str] = tokenizer.convert_tokens_to_ids(__a )
UpperCamelCase_: int = 'Encode <mask> sequence'
UpperCamelCase_: List[Any] = 'Encode <mask>sequence'
UpperCamelCase_: str = tokenizer.encode(__a )
UpperCamelCase_: str = encoded.index(__a )
UpperCamelCase_: Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__a , __a )
UpperCamelCase_: Dict = tokenizer.encode(__a )
UpperCamelCase_: int = encoded.index(__a )
UpperCamelCase_: Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__a , __a )
def lowerCAmelCase__ ( self : Dict ):
pass
def lowerCAmelCase__ ( self : Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: List[str] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCamelCase_: Dict = self.tokenizer_class.from_pretrained(__a , **__a )
UpperCamelCase_: Any = 'A, <mask> AllenNLP sentence.'
UpperCamelCase_: Optional[int] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
UpperCamelCase_: int = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
UpperCamelCase_: List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
UpperCamelCase_: int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCAmelCase__ ( self : Tuple ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCamelCase_: Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
UpperCamelCase_: Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCamelCase_: Dict = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""trim_offsets"""] , __a )
def lowerCAmelCase__ ( self : List[str] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: List[Any] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase_: str = f'''{text_of_1_token} {text_of_1_token}'''
UpperCamelCase_: Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
UpperCamelCase_: Dict = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
UpperCamelCase_: str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
UpperCamelCase_: Any = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
UpperCamelCase_: Dict = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
UpperCamelCase_: Optional[int] = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
UpperCamelCase_: Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
UpperCamelCase_: List[str] = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
UpperCamelCase_: Any = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCamelCase_: List[str] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
UpperCamelCase_: List[str] = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ) + 1, 1 + len(__a ) + 1 + len(__a )) , )
UpperCamelCase_: List[str] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
UpperCamelCase_: Any = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
UpperCamelCase_: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
UpperCamelCase_: Dict = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
| 717 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : str = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 0 |
from __future__ import annotations
from typing import Any
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Any , snake_case_ : int , snake_case_ : int , snake_case_ : float = 0 ):
UpperCamelCase_, UpperCamelCase_: Dict = row, column
UpperCamelCase_: Union[str, Any] = [[default_value for c in range(__A )] for r in range(__A )]
def __str__( self : int ):
UpperCamelCase_: Any = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
UpperCamelCase_: Union[str, Any] = 0
for row_vector in self.array:
for obj in row_vector:
UpperCamelCase_: Optional[Any] = max(__A , len(str(__A ) ) )
UpperCamelCase_: Union[str, Any] = f'''%{max_element_length}s'''
# Make string and return
def single_line(snake_case_ : list[float] ) -> str:
nonlocal string_format_identifier
UpperCamelCase_: List[str] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__A ) for row_vector in self.array )
return s
def __repr__( self : List[str] ):
return str(self )
def lowerCAmelCase__ ( self : str , snake_case_ : tuple[int, int] ):
if not (isinstance(__A , (list, tuple) ) and len(__A ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : int , snake_case_ : tuple[int, int] ):
assert self.validate_indicies(__A )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Any , snake_case_ : tuple[int, int] , snake_case_ : float ):
assert self.validate_indicies(__A )
UpperCamelCase_: Any = value
def __add__( self : int , snake_case_ : Matrix ):
assert isinstance(__A , __A )
assert self.row == another.row and self.column == another.column
# Add
UpperCamelCase_: Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCamelCase_: str = self[r, c] + another[r, c]
return result
def __neg__( self : Dict ):
UpperCamelCase_: str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCamelCase_: Optional[int] = -self[r, c]
return result
def __sub__( self : List[Any] , snake_case_ : Matrix ):
return self + (-another)
def __mul__( self : List[str] , snake_case_ : int | float | Matrix ):
if isinstance(__A , (int, float) ): # Scalar multiplication
UpperCamelCase_: List[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCamelCase_: Optional[Any] = self[r, c] * another
return result
elif isinstance(__A , __A ): # Matrix multiplication
assert self.column == another.row
UpperCamelCase_: Any = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
UpperCamelCase_: Dict = f'''Unsupported type given for another ({type(__A )})'''
raise TypeError(__A )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Optional[int] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
UpperCamelCase_: List[Any] = self[r, c]
return result
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Matrix , snake_case_ : Matrix ):
assert isinstance(__A , __A ) and isinstance(__A , __A )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
UpperCamelCase_: Union[str, Any] = v.transpose()
UpperCamelCase_: Optional[int] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A__ ( ) -> int:
UpperCamelCase_: Optional[int] = Matrix(3 , 3 , 0 )
for i in range(3 ):
UpperCamelCase_: Union[str, Any] = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
UpperCamelCase_: Optional[Any] = Matrix(3 , 1 , 0 )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Optional[int] = 1, 2, -3
UpperCamelCase_: Dict = Matrix(3 , 1 , 0 )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Union[str, Any] = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowercase , _lowercase )}''' )
def A__ ( ) -> Union[str, Any]:
import doctest
doctest.testmod()
testa()
| 718 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = "x" , lowerCamelCase = 10**-10 , lowerCamelCase = 1 , ) -> complex:
UpperCamelCase_: Optional[Any] = symbols(lowerCamelCase )
UpperCamelCase_: int = lambdify(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[Any] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_: Tuple = starting_point
while True:
if diff_function(lowerCamelCase ) != 0:
UpperCamelCase_: List[Any] = prev_guess - multiplicity * func(lowerCamelCase ) / diff_function(
lowerCamelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase_: Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 670 | 0 |
from __future__ import annotations
import requests
lowerCamelCase_ : int = set(
"""approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports""".split()
)
def A__ ( lowerCamelCase , lowerCamelCase = 1 , lowerCamelCase = "new" , lowerCamelCase = None ) -> str:
UpperCamelCase_: Union[str, Any] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(_lowerCamelCase ) - valid_terms ) ):
UpperCamelCase_: int = F'''Invalid search term: {invalid_search_terms}'''
raise ValueError(_lowerCamelCase )
UpperCamelCase_: Optional[int] = requests.get(
F'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 4_29:
raise requests.HTTPError
UpperCamelCase_: Union[str, Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(_lowerCamelCase )}
UpperCamelCase_: Union[str, Any] = {}
for id_ in range(_lowerCamelCase ):
UpperCamelCase_: Optional[Any] = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 719 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 0 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCamelCase_ : Dict = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def A__ ( lowerCamelCase ) -> Optional[int]:
UpperCamelCase_: List[Any] = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
lowerCamelCase_ : Any = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def A__ ( lowerCamelCase ) -> Any:
UpperCamelCase_: Dict = list(s_dict.keys() )
for key in keys:
UpperCamelCase_: List[str] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
UpperCamelCase_: List[Any] = new_key.replace(lowercase_ , lowercase_ )
print(F'''{key} -> {new_key}''' )
UpperCamelCase_: Tuple = s_dict.pop(lowercase_ )
return s_dict
def A__ ( lowerCamelCase ) -> Optional[int]:
UpperCamelCase_: Any = emb.weight.shape
UpperCamelCase_: str = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
UpperCamelCase_: Union[str, Any] = emb.weight.data
return lin_layer
def A__ ( lowerCamelCase , lowerCamelCase ) -> bytes:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
UpperCamelCase_: Tuple = os.path.basename(lowercase_ )
UpperCamelCase_: int = url.split("""/""" )[-2]
UpperCamelCase_: Dict = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ) and not os.path.isfile(lowercase_ ):
raise RuntimeError(F'''{download_target} exists and is not a regular file''' )
if os.path.isfile(lowercase_ ):
UpperCamelCase_: Optional[Any] = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(lowercase_ ) as source, open(lowercase_ , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowercase_ , unit_divisor=10_24 ) as loop:
while True:
UpperCamelCase_: Any = source.read(81_92 )
if not buffer:
break
output.write(lowercase_ )
loop.update(len(lowercase_ ) )
UpperCamelCase_: Dict = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def A__ ( lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
UpperCamelCase_: Tuple = _download(_MODELS[checkpoint_path] )
else:
UpperCamelCase_: Optional[int] = torch.load(lowercase_ , map_location="""cpu""" )
UpperCamelCase_: str = original_checkpoint["""dims"""]
UpperCamelCase_: List[Any] = original_checkpoint["""model_state_dict"""]
UpperCamelCase_: Optional[Any] = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(lowercase_ )
rename_keys(lowercase_ )
UpperCamelCase_: List[str] = True
UpperCamelCase_: Optional[Any] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
UpperCamelCase_: List[Any] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowercase_ , decoder_ffn_dim=lowercase_ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
UpperCamelCase_: Optional[Any] = WhisperForConditionalGeneration(lowercase_ )
UpperCamelCase_: List[Any] = model.model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0 and not set(lowercase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
UpperCamelCase_: Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCamelCase_: str = proj_out_weights
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase_ : Any = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCamelCase_ : Tuple = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 720 |
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[Any] = Text("""CPU""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(1 )]
UpperCamelCase_: Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[int] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
UpperCamelCase_: Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""Model""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
UpperCamelCase_: List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Tuple = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
UpperCamelCase_: int = 0.46 / 4
UpperCamelCase_: Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 670 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase : Dict = ['image_processor', 'tokenizer']
__UpperCamelCase : Optional[int] = 'ChineseCLIPImageProcessor'
__UpperCamelCase : Union[str, Any] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Tuple , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=None , **snake_case_ : Dict ):
UpperCamelCase_: Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _a , )
UpperCamelCase_: int = kwargs.pop("""feature_extractor""" )
UpperCamelCase_: Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a , _a )
UpperCamelCase_: List[Any] = self.image_processor
def __call__( self : int , snake_case_ : Optional[Any]=None , snake_case_ : Union[str, Any]=None , snake_case_ : Optional[Any]=None , **snake_case_ : Optional[int] ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCamelCase_: List[str] = self.tokenizer(_a , return_tensors=_a , **_a )
if images is not None:
UpperCamelCase_: List[Any] = self.image_processor(_a , return_tensors=_a , **_a )
if text is not None and images is not None:
UpperCamelCase_: List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def lowerCAmelCase__ ( self : Optional[int] , *snake_case_ : List[str] , **snake_case_ : List[Any] ):
return self.tokenizer.batch_decode(*_a , **_a )
def lowerCAmelCase__ ( self : Any , *snake_case_ : Tuple , **snake_case_ : str ):
return self.tokenizer.decode(*_a , **_a )
@property
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Tuple = self.tokenizer.model_input_names
UpperCamelCase_: Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase__ ( self : List[Any] ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _a , )
return self.image_processor_class
| 721 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = """laion/clap-htsat-unfused"""
UpperCamelCase_: List[str] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Dict = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[str] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Optional[Any] = floats_list((3, 1000) )
UpperCamelCase_: List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: int = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.get_feature_extractor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Dict = """This is a test string"""
UpperCamelCase_: Tuple = processor(text=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = self.get_feature_extractor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Tuple = processor.batch_decode(snake_case_ )
UpperCamelCase_: str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = self.get_feature_extractor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 670 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCamelCase_ : int = logging.get_logger(__name__)
lowerCamelCase_ : List[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCamelCase_ : Optional[int] = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
lowerCamelCase_ : Dict = {
"""facebook/bart-base""": 10_24,
"""facebook/bart-large""": 10_24,
"""facebook/bart-large-mnli""": 10_24,
"""facebook/bart-large-cnn""": 10_24,
"""facebook/bart-large-xsum""": 10_24,
"""yjernite/bart_eli5""": 10_24,
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Dict = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Optional[int] = ["input_ids", "attention_mask"]
__UpperCamelCase : Any = BartTokenizer
def __init__( self : Any , snake_case_ : str=None , snake_case_ : str=None , snake_case_ : Tuple=None , snake_case_ : List[Any]="replace" , snake_case_ : List[Any]="<s>" , snake_case_ : List[Any]="</s>" , snake_case_ : Union[str, Any]="</s>" , snake_case_ : List[Any]="<s>" , snake_case_ : List[Any]="<unk>" , snake_case_ : int="<pad>" , snake_case_ : int="<mask>" , snake_case_ : str=False , snake_case_ : Optional[int]=True , **snake_case_ : Optional[int] , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCamelCase_: Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
UpperCamelCase_: Optional[int] = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
UpperCamelCase_: List[Any] = add_prefix_space
UpperCamelCase_: int = pre_tok_class(**UpperCamelCase__ )
UpperCamelCase_: int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCamelCase_: Dict = """post_processor"""
UpperCamelCase_: Any = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
if tokenizer_component_instance:
UpperCamelCase_: Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase_: str = tuple(state["""sep"""] )
if "cls" in state:
UpperCamelCase_: List[Any] = tuple(state["""cls"""] )
UpperCamelCase_: List[Any] = False
if state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
UpperCamelCase_: Dict = add_prefix_space
UpperCamelCase_: Tuple = True
if state.get("""trim_offsets""" , UpperCamelCase__ ) != trim_offsets:
UpperCamelCase_: Optional[int] = trim_offsets
UpperCamelCase_: str = True
if changes_to_apply:
UpperCamelCase_: str = getattr(UpperCamelCase__ , state.pop("""type""" ) )
UpperCamelCase_: List[str] = component_class(**UpperCamelCase__ )
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase__ ( self : Any , snake_case_ : Optional[Any] ):
UpperCamelCase_: List[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else value
UpperCamelCase_: Optional[int] = value
def lowerCAmelCase__ ( self : Tuple , *snake_case_ : Tuple , **snake_case_ : Dict ):
UpperCamelCase_: List[str] = kwargs.get("""is_split_into_words""" , UpperCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , *snake_case_ : List[Any] , **snake_case_ : List[Any] ):
UpperCamelCase_: List[str] = kwargs.get("""is_split_into_words""" , UpperCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Tuple , snake_case_ : Union[str, Any] = None ):
UpperCamelCase_: Optional[int] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCAmelCase__ ( self : Any , snake_case_ : Any , snake_case_ : List[str]=None ):
UpperCamelCase_: Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self : Tuple , snake_case_ : str , snake_case_ : Optional[int] = None ):
UpperCamelCase_: List[Any] = [self.sep_token_id]
UpperCamelCase_: str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 700 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Tuple=None , **snake_case_ : List[str] ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case_ , )
super().__init__(args=snake_case_ , **snake_case_ )
| 670 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Union[str, Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
lowerCamelCase_ : List[str] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def A__ ( lowerCamelCase ) -> Any:
UpperCamelCase_: str = {}
with open(lowerCamelCase , """r""" ) as file:
for line_number, line in enumerate(lowerCamelCase ):
UpperCamelCase_: Tuple = line.strip()
if line:
UpperCamelCase_: int = line.split()
UpperCamelCase_: List[str] = line_number
UpperCamelCase_: Tuple = words[0]
UpperCamelCase_: Union[str, Any] = value
return result
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
for attribute in key.split(""".""" ):
UpperCamelCase_: Optional[Any] = getattr(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Dict = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCamelCase ):
UpperCamelCase_: Dict = PARAM_MAPPING[full_name.split(""".""" )[-1]]
UpperCamelCase_: Dict = """param"""
if weight_type is not None and weight_type != "param":
UpperCamelCase_: Any = getattr(lowerCamelCase , lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
UpperCamelCase_: Optional[int] = hf_pointer
for attribute in hf_param_name.split(""".""" ):
UpperCamelCase_: Dict = getattr(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Tuple = shape_pointer.shape
# let's reduce dimension
UpperCamelCase_: Union[str, Any] = value[0]
else:
UpperCamelCase_: List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCamelCase_: Optional[Any] = value
elif weight_type == "weight_g":
UpperCamelCase_: int = value
elif weight_type == "weight_v":
UpperCamelCase_: Union[str, Any] = value
elif weight_type == "bias":
UpperCamelCase_: Optional[int] = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
UpperCamelCase_: str = getattr(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[int] = value
else:
UpperCamelCase_: Optional[Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
UpperCamelCase_: List[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
UpperCamelCase_: List[str] = """param"""
if weight_type is not None and weight_type != "param":
UpperCamelCase_: Union[str, Any] = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCamelCase_: List[str] = """.""".join([key, hf_param_name] )
else:
UpperCamelCase_: Optional[Any] = key
UpperCamelCase_: Tuple = value if """lm_head""" in full_key else value[0]
lowerCamelCase_ : Dict = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ) -> List[str]:
UpperCamelCase_: int = False
for key, mapped_key in MAPPING.items():
UpperCamelCase_: Union[str, Any] = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCamelCase_: Any = True
if "*" in mapped_key:
UpperCamelCase_: Optional[int] = name.split(lowerCamelCase )[0].split(""".""" )[-2]
UpperCamelCase_: Union[str, Any] = mapped_key.replace("""*""" , lowerCamelCase )
if "weight_g" in name:
UpperCamelCase_: Dict = """weight_g"""
elif "weight_v" in name:
UpperCamelCase_: Optional[int] = """weight_v"""
elif "bias" in name:
UpperCamelCase_: Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase_: Dict = """weight"""
else:
UpperCamelCase_: Optional[int] = None
if hf_dict is not None:
rename_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
return is_used
return is_used
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
UpperCamelCase_: Tuple = []
UpperCamelCase_: Optional[Any] = fairseq_model.state_dict()
UpperCamelCase_: int = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase_: Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
UpperCamelCase_: Union[str, Any] = True
else:
UpperCamelCase_: List[str] = load_wavaveca_layer(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
UpperCamelCase_: List[Any] = full_name.split("""conv_layers.""" )[-1]
UpperCamelCase_: Optional[Any] = name.split(""".""" )
UpperCamelCase_: Dict = int(items[0] )
UpperCamelCase_: Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCamelCase_: Optional[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCamelCase_: Dict = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCamelCase_: Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCamelCase_: Any = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase )
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=False ) -> int:
if config_path is not None:
UpperCamelCase_: str = WavaVecaConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_: Optional[int] = WavaVecaConfig()
if is_seq_class:
UpperCamelCase_: Optional[Any] = read_txt_into_dict(lowerCamelCase )
UpperCamelCase_: List[Any] = idalabel
UpperCamelCase_: int = WavaVecaForSequenceClassification(lowerCamelCase )
UpperCamelCase_: Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCamelCase , return_attention_mask=lowerCamelCase , )
feature_extractor.save_pretrained(lowerCamelCase )
elif is_finetuned:
if dict_path:
UpperCamelCase_: str = Dictionary.load(lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase_: int = target_dict.pad_index
UpperCamelCase_: Tuple = target_dict.bos_index
UpperCamelCase_: str = target_dict.eos_index
UpperCamelCase_: Any = len(target_dict.symbols )
UpperCamelCase_: int = os.path.join(lowerCamelCase , """vocab.json""" )
if not os.path.isdir(lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCamelCase ) )
return
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
UpperCamelCase_: Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase_: Union[str, Any] = 0
UpperCamelCase_: List[str] = 1
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Tuple = WavaVecaCTCTokenizer(
lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCamelCase , )
UpperCamelCase_: List[str] = True if config.feat_extract_norm == """layer""" else False
UpperCamelCase_: Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCamelCase , return_attention_mask=lowerCamelCase , )
UpperCamelCase_: Optional[Any] = WavaVecaProcessor(feature_extractor=lowerCamelCase , tokenizer=lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
UpperCamelCase_: Optional[Any] = WavaVecaForCTC(lowerCamelCase )
else:
UpperCamelCase_: Optional[Any] = WavaVecaForPreTraining(lowerCamelCase )
if is_finetuned or is_seq_class:
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
UpperCamelCase_: Any = argparse.Namespace(task="""audio_pretraining""" )
UpperCamelCase_: Optional[int] = fairseq.tasks.setup_task(lowerCamelCase )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCamelCase )
UpperCamelCase_: Tuple = model[0].eval()
recursively_load_weights(lowerCamelCase , lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
lowerCamelCase_ : Optional[Any] = parser.parse_args()
lowerCamelCase_ : Optional[int] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 701 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = logging.get_logger("""transformers.models.speecht5""")
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
hf_model.apply_weight_norm()
UpperCamelCase_: Union[str, Any] = checkpoint["""input_conv.weight_g"""]
UpperCamelCase_: Optional[int] = checkpoint["""input_conv.weight_v"""]
UpperCamelCase_: List[Any] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCamelCase_: Dict = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCamelCase_: Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCamelCase_: int = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCamelCase_: int = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase_: Tuple = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase_: List[str] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if config_path is not None:
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_: str = SpeechTaHifiGanConfig()
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGan(lowerCamelCase )
UpperCamelCase_: str = torch.load(lowerCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = np.load(lowerCamelCase )
UpperCamelCase_: int = stats[0].reshape(-1 )
UpperCamelCase_: Union[str, Any] = stats[1].reshape(-1 )
UpperCamelCase_: Dict = torch.from_numpy(lowerCamelCase ).float()
UpperCamelCase_: Optional[Any] = torch.from_numpy(lowerCamelCase ).float()
model.save_pretrained(lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 670 | 0 |
from __future__ import annotations
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> None:
UpperCamelCase_: int = len(SCREAMING_SNAKE_CASE_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(SCREAMING_SNAKE_CASE_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
def A__ ( lowerCamelCase ) -> None:
UpperCamelCase_: list[list[str]] = []
depth_first_search([] , [] , [] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Print all the boards
for board in boards:
for column in board:
print(SCREAMING_SNAKE_CASE_ )
print("""""" )
print(len(SCREAMING_SNAKE_CASE_ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 702 |
lowerCamelCase_ : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Optional[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 670 | 0 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = XGLMConfig
__UpperCamelCase : int = {}
__UpperCamelCase : str = """gelu"""
def __init__( self : Tuple , snake_case_ : int , snake_case_ : str=14 , snake_case_ : Tuple=7 , snake_case_ : Optional[int]=True , snake_case_ : Optional[int]=True , snake_case_ : List[str]=True , snake_case_ : int=99 , snake_case_ : Optional[int]=32 , snake_case_ : Any=2 , snake_case_ : Optional[Any]=4 , snake_case_ : Optional[int]=37 , snake_case_ : List[Any]="gelu" , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Any=0.1 , snake_case_ : Optional[int]=512 , snake_case_ : Tuple=0.02 , ):
UpperCamelCase_: str = parent
UpperCamelCase_: Tuple = batch_size
UpperCamelCase_: Optional[int] = seq_length
UpperCamelCase_: List[Any] = is_training
UpperCamelCase_: str = use_input_mask
UpperCamelCase_: Optional[Any] = use_labels
UpperCamelCase_: List[str] = vocab_size
UpperCamelCase_: Optional[int] = d_model
UpperCamelCase_: str = num_hidden_layers
UpperCamelCase_: Any = num_attention_heads
UpperCamelCase_: Dict = ffn_dim
UpperCamelCase_: Any = activation_function
UpperCamelCase_: Optional[Any] = activation_dropout
UpperCamelCase_: Dict = attention_dropout
UpperCamelCase_: int = max_position_embeddings
UpperCamelCase_: Union[str, Any] = initializer_range
UpperCamelCase_: List[Any] = None
UpperCamelCase_: Optional[int] = 0
UpperCamelCase_: str = 2
UpperCamelCase_: List[str] = 1
def lowerCAmelCase__ ( self : List[Any] ):
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: List[str] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase_: Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase_: Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_: Optional[Any] = self.get_config()
UpperCamelCase_: Tuple = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCAmelCase__ ( self : List[str] ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=snake_case_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=snake_case_ , )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: int = self.prepare_config_and_inputs()
(
UpperCamelCase_
): Tuple = config_and_inputs
UpperCamelCase_: Tuple = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( _A , _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[str] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCamelCase : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCamelCase : List[Any] = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCamelCase : Any = False
__UpperCamelCase : int = False
__UpperCamelCase : List[str] = False
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Optional[int] = TFXGLMModelTester(self )
UpperCamelCase_: Any = ConfigTester(self , config_class=snake_case_ , n_embd=37 )
def lowerCAmelCase__ ( self : List[str] ):
self.config_tester.run_common_tests()
@slow
def lowerCAmelCase__ ( self : List[Any] ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_: Tuple = TFXGLMModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def lowerCAmelCase__ ( self : Dict ):
super().test_resize_token_embeddings()
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self : Tuple , snake_case_ : Optional[Any]=True ):
UpperCamelCase_: Union[str, Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase_: Dict = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase_: int = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
UpperCamelCase_: Dict = model.generate(snake_case_ , do_sample=snake_case_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , snake_case_ )
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase_: str = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
UpperCamelCase_: int = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
UpperCamelCase_: List[Any] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
UpperCamelCase_: Tuple = model.generate(snake_case_ , do_sample=snake_case_ , seed=[7, 0] )
UpperCamelCase_: Union[str, Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case_ )
UpperCamelCase_: int = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: List[Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase_: Union[str, Any] = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase_: Any = 'left'
# use different length sentences to test batching
UpperCamelCase_: List[Any] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
UpperCamelCase_: Dict = tokenizer(snake_case_ , return_tensors="""tf""" , padding=snake_case_ )
UpperCamelCase_: int = inputs['input_ids']
UpperCamelCase_: Dict = model.generate(input_ids=snake_case_ , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
UpperCamelCase_: Union[str, Any] = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
UpperCamelCase_: int = model.generate(input_ids=snake_case_ , max_new_tokens=12 )
UpperCamelCase_: Tuple = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
UpperCamelCase_: Any = model.generate(input_ids=snake_case_ , max_new_tokens=12 )
UpperCamelCase_: List[str] = tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case_ )
UpperCamelCase_: Any = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , [non_padded_sentence, padded_sentence] )
| 703 |
import cva
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , snake_case_ : float , snake_case_ : int ):
if k in (0.04, 0.06):
UpperCamelCase_: Union[str, Any] = k
UpperCamelCase_: Union[str, Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : int ):
return str(self.k )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : str ):
UpperCamelCase_: int = cva.imread(snake_case_ , 0 )
UpperCamelCase_, UpperCamelCase_: List[Any] = img.shape
UpperCamelCase_: list[list[int]] = []
UpperCamelCase_: int = img.copy()
UpperCamelCase_: Any = cva.cvtColor(snake_case_ , cva.COLOR_GRAY2RGB )
UpperCamelCase_, UpperCamelCase_: List[Any] = np.gradient(snake_case_ )
UpperCamelCase_: Optional[Any] = dx**2
UpperCamelCase_: Dict = dy**2
UpperCamelCase_: Optional[Any] = dx * dy
UpperCamelCase_: str = 0.04
UpperCamelCase_: int = self.window_size // 2
for y in range(snake_case_ , h - offset ):
for x in range(snake_case_ , w - offset ):
UpperCamelCase_: List[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = (wxx * wyy) - (wxy**2)
UpperCamelCase_: Optional[int] = wxx + wyy
UpperCamelCase_: Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = HarrisCorner(0.04, 3)
lowerCamelCase_ , lowerCamelCase_ : Any = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 670 | 0 |
import numpy as np
lowerCamelCase_ : Optional[int] = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] ):
UpperCamelCase_: Any = np.array(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Optional[Any] ):
UpperCamelCase_: Optional[Any] = np.where(letter == self.SQUARE )
UpperCamelCase_: List[str] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCAmelCase__ ( self : Dict , snake_case_ : List[str] , snake_case_ : List[Any] ):
UpperCamelCase_: int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : List[Any] ):
UpperCamelCase_: int = message.lower()
UpperCamelCase_: Tuple = message.replace(""" """ , """""" )
UpperCamelCase_: int = message.replace("""j""" , """i""" )
UpperCamelCase_: Any = np.empty((2, len(snake_case_ )) )
for letter_index in range(len(snake_case_ ) ):
UpperCamelCase_: Optional[int] = self.letter_to_numbers(message[letter_index] )
UpperCamelCase_: Union[str, Any] = numbers[0]
UpperCamelCase_: Union[str, Any] = numbers[1]
UpperCamelCase_: Optional[int] = first_step.reshape(2 * len(snake_case_ ) )
UpperCamelCase_: int = ''
for numbers_index in range(len(snake_case_ ) ):
UpperCamelCase_: str = int(second_step[numbers_index * 2] )
UpperCamelCase_: str = int(second_step[(numbers_index * 2) + 1] )
UpperCamelCase_: Tuple = self.numbers_to_letter(snake_case_ , snake_case_ )
UpperCamelCase_: Tuple = encoded_message + letter
return encoded_message
def lowerCAmelCase__ ( self : int , snake_case_ : List[str] ):
UpperCamelCase_: Optional[int] = message.lower()
message.replace(""" """ , """""" )
UpperCamelCase_: Tuple = np.empty(2 * len(snake_case_ ) )
for letter_index in range(len(snake_case_ ) ):
UpperCamelCase_: Optional[Any] = self.letter_to_numbers(message[letter_index] )
UpperCamelCase_: Optional[int] = numbers[0]
UpperCamelCase_: Dict = numbers[1]
UpperCamelCase_: Optional[int] = first_step.reshape((2, len(snake_case_ )) )
UpperCamelCase_: List[str] = ''
for numbers_index in range(len(snake_case_ ) ):
UpperCamelCase_: List[Any] = int(second_step[0, numbers_index] )
UpperCamelCase_: Optional[int] = int(second_step[1, numbers_index] )
UpperCamelCase_: Tuple = self.numbers_to_letter(snake_case_ , snake_case_ )
UpperCamelCase_: str = decoded_message + letter
return decoded_message
| 704 |
import random
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False ) -> dict:
UpperCamelCase_: dict = {i: [] for i in range(lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase ):
for j in range(i + 1 , lowerCamelCase ):
if random.random() < probability:
graph[i].append(lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase )
return graph
def A__ ( lowerCamelCase ) -> dict:
return {
i: [j for j in range(lowerCamelCase ) if i != j] for i in range(lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = CTRLTokenizer
__UpperCamelCase : str = False
__UpperCamelCase : str = False
def lowerCAmelCase__ ( self : str ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase_: Dict = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
UpperCamelCase_: Optional[Any] = dict(zip(_A , range(len(_A ) ) ) )
UpperCamelCase_: List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
UpperCamelCase_: Dict = {"""unk_token""": """<unk>"""}
UpperCamelCase_: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase_: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_A ) )
def lowerCAmelCase__ ( self : int , **snake_case_ : List[Any] ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_A )
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
UpperCamelCase_: Dict = """adapt react readapt apt"""
UpperCamelCase_: List[Any] = """adapt react readapt apt"""
return input_text, output_text
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase_: Optional[Any] = """adapt react readapt apt"""
UpperCamelCase_: Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
UpperCamelCase_: Optional[int] = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCamelCase_: str = tokens + [tokenizer.unk_token]
UpperCamelCase_: List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
| 705 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Optional[int] = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase_: Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = logging.get_verbosity()
UpperCamelCase_: int = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Union[str, Any] = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(snake_case_ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def lowerCAmelCase__ ( self : Optional[int] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: str = os.getenv("""TRANSFORMERS_VERBOSITY""" , snake_case_ )
UpperCamelCase_: Any = logging.log_levels[env_level_str]
UpperCamelCase_: Dict = logging.get_verbosity()
self.assertEqual(
snake_case_ , snake_case_ , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCamelCase_: str = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def lowerCAmelCase__ ( self : List[Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: str = logging.logging.getLogger()
with CaptureLogger(snake_case_ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def lowerCAmelCase__ ( self : List[Any] ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Any = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
def A__ ( ) -> Union[str, Any]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 670 | 0 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : Optional[Any] = {
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class _UpperCamelCase ( __a ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = '''bart'''
__UpperCamelCase : Optional[int] = ['''past_key_values''']
__UpperCamelCase : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[Any] , snake_case_ : Union[str, Any]=5_0265 , snake_case_ : int=1024 , snake_case_ : Optional[Any]=12 , snake_case_ : Dict=4096 , snake_case_ : Dict=16 , snake_case_ : int=12 , snake_case_ : Optional[Any]=4096 , snake_case_ : Optional[Any]=16 , snake_case_ : Tuple=0.0 , snake_case_ : Dict=0.0 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Dict=1024 , snake_case_ : Any=0.1 , snake_case_ : List[Any]=0.0 , snake_case_ : str=0.0 , snake_case_ : Optional[int]=0.02 , snake_case_ : Optional[int]=0.0 , snake_case_ : List[str]=False , snake_case_ : Tuple=True , snake_case_ : List[Any]=3 , snake_case_ : str=1 , snake_case_ : Tuple=0 , snake_case_ : Union[str, Any]=2 , snake_case_ : Optional[int]=True , snake_case_ : Optional[Any]=2 , snake_case_ : List[str]=2 , **snake_case_ : Tuple , ):
UpperCamelCase_: Optional[Any] = vocab_size
UpperCamelCase_: Any = max_position_embeddings
UpperCamelCase_: Dict = d_model
UpperCamelCase_: Optional[int] = encoder_ffn_dim
UpperCamelCase_: Union[str, Any] = encoder_layers
UpperCamelCase_: List[Any] = encoder_attention_heads
UpperCamelCase_: Union[str, Any] = decoder_ffn_dim
UpperCamelCase_: List[Any] = decoder_layers
UpperCamelCase_: Any = decoder_attention_heads
UpperCamelCase_: Any = dropout
UpperCamelCase_: List[Any] = attention_dropout
UpperCamelCase_: Any = activation_dropout
UpperCamelCase_: Tuple = activation_function
UpperCamelCase_: List[str] = init_std
UpperCamelCase_: Dict = encoder_layerdrop
UpperCamelCase_: Optional[Any] = decoder_layerdrop
UpperCamelCase_: Tuple = classifier_dropout
UpperCamelCase_: Tuple = use_cache
UpperCamelCase_: Union[str, Any] = encoder_layers
UpperCamelCase_: int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , snake_case__ ):
UpperCamelCase_: str = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
class _UpperCamelCase ( __a ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : List[str] ):
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase_: Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCamelCase_: Union[str, Any] = {0: """batch"""}
UpperCamelCase_: List[str] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
UpperCamelCase_: List[Any] = {0: """batch""", 1: """decoder_sequence"""}
UpperCamelCase_: int = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCamelCase_: int = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCamelCase_, UpperCamelCase_: Optional[Any] = self.num_layers
for i in range(snake_case__ ):
UpperCamelCase_: Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCamelCase_: Any = {0: """batch""", 2: """past_sequence + sequence"""}
else:
UpperCamelCase_: List[str] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def lowerCAmelCase__ ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase_: Optional[Any] = super().outputs
else:
UpperCamelCase_: List[Any] = super(snake_case__ , self ).outputs
if self.use_past:
UpperCamelCase_, UpperCamelCase_: Any = self.num_layers
for i in range(snake_case__ ):
UpperCamelCase_: Dict = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCamelCase_: List[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
UpperCamelCase_: int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Generate decoder inputs
UpperCamelCase_: Tuple = seq_length if not self.use_past else 1
UpperCamelCase_: int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase_: Any = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
UpperCamelCase_: List[str] = dict(**snake_case__ , **snake_case__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCamelCase_, UpperCamelCase_: Dict = common_inputs["""input_ids"""].shape
UpperCamelCase_: str = common_inputs["""decoder_input_ids"""].shape[1]
UpperCamelCase_, UpperCamelCase_: List[Any] = self.num_attention_heads
UpperCamelCase_: str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase_: List[Any] = decoder_seq_length + 3
UpperCamelCase_: List[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCamelCase_: Union[str, Any] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(snake_case__ , snake_case__ )] , dim=1 )
UpperCamelCase_: Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCamelCase_, UpperCamelCase_: Union[str, Any] = self.num_layers
UpperCamelCase_: Tuple = min(snake_case__ , snake_case__ )
UpperCamelCase_: Any = max(snake_case__ , snake_case__ ) - min_num_layers
UpperCamelCase_: Tuple = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(snake_case__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case__ ),
torch.zeros(snake_case__ ),
torch.zeros(snake_case__ ),
torch.zeros(snake_case__ ),
) )
# TODO: test this.
UpperCamelCase_: int = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(snake_case__ , snake_case__ ):
common_inputs["past_key_values"].append((torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) )
return common_inputs
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
UpperCamelCase_: str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCamelCase_, UpperCamelCase_: Union[str, Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCamelCase_: Optional[Any] = seqlen + 2
UpperCamelCase_, UpperCamelCase_: Optional[Any] = self.num_layers
UpperCamelCase_, UpperCamelCase_: Tuple = self.num_attention_heads
UpperCamelCase_: Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase_: Tuple = common_inputs["""attention_mask"""].dtype
UpperCamelCase_: Union[str, Any] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
UpperCamelCase_: Any = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(snake_case__ )
]
return common_inputs
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase_: List[Any] = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase_: str = tokenizer.num_special_tokens_to_add(snake_case__ )
UpperCamelCase_: str = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase_: str = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCamelCase_: int = dict(tokenizer(snake_case__ , return_tensors=snake_case__ ) )
return common_inputs
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase_: Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
elif self.task == "causal-lm":
UpperCamelCase_: Tuple = self._generate_dummy_inputs_for_causal_lm(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
else:
UpperCamelCase_: List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
return common_inputs
def lowerCAmelCase__ ( self : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : int ):
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase_: Any = super()._flatten_past_key_values_(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
UpperCamelCase_: Optional[Any] = super(snake_case__ , self )._flatten_past_key_values_(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
| 706 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase_ : Optional[int] = HUGGINGFACE_HUB_CACHE
lowerCamelCase_ : List[str] = """config.json"""
lowerCamelCase_ : Any = """diffusion_pytorch_model.bin"""
lowerCamelCase_ : Union[str, Any] = """diffusion_flax_model.msgpack"""
lowerCamelCase_ : Dict = """model.onnx"""
lowerCamelCase_ : List[Any] = """diffusion_pytorch_model.safetensors"""
lowerCamelCase_ : Optional[Any] = """weights.pb"""
lowerCamelCase_ : Optional[Any] = """https://huggingface.co"""
lowerCamelCase_ : Union[str, Any] = default_cache_path
lowerCamelCase_ : Tuple = """diffusers_modules"""
lowerCamelCase_ : Optional[Any] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowerCamelCase_ : str = ["""fp16""", """non-ema"""]
lowerCamelCase_ : List[Any] = """.self_attn"""
| 670 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
class _UpperCamelCase ( __lowerCamelCase ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = '''encoder-decoder'''
__UpperCamelCase : int = True
def __init__( self : List[str] , **snake_case_ : str ):
super().__init__(**a_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCamelCase_: Tuple = kwargs.pop("""encoder""" )
UpperCamelCase_: List[str] = encoder_config.pop("""model_type""" )
UpperCamelCase_: List[Any] = kwargs.pop("""decoder""" )
UpperCamelCase_: List[str] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
UpperCamelCase_: Tuple = AutoConfig.for_model(a_ , **a_ )
UpperCamelCase_: Optional[int] = AutoConfig.for_model(a_ , **a_ )
UpperCamelCase_: List[Any] = True
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] , snake_case_ : Dict , snake_case_ : List[Any] , **snake_case_ : str ):
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
UpperCamelCase_: Dict = True
UpperCamelCase_: Optional[int] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **a_ )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: int = copy.deepcopy(self.__dict__ )
UpperCamelCase_: str = self.encoder.to_dict()
UpperCamelCase_: Any = self.decoder.to_dict()
UpperCamelCase_: List[Any] = self.__class__.model_type
return output
| 707 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase_: List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCamelCase_: str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Any = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
UpperCamelCase_: Dict = [sys.executable] + distributed_args
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
| 670 | 0 |
def A__ ( lowerCamelCase ) -> Optional[int]:
UpperCamelCase_: Union[str, Any] = [0] * len(_snake_case )
for i in range(1 , len(_snake_case ) ):
# use last results for better performance - dynamic programming
UpperCamelCase_: List[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCamelCase_: List[str] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCamelCase_: Union[str, Any] = j
return prefix_result
def A__ ( lowerCamelCase ) -> Tuple:
return max(prefix_function(_snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = BarthezTokenizer
__UpperCamelCase : str = BarthezTokenizerFast
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = True
def lowerCAmelCase__ ( self : Optional[int] ):
super().setUp()
UpperCamelCase_: Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ )
UpperCamelCase_: Dict = tokenizer
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: str = """<pad>"""
UpperCamelCase_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case_ ) , 10_1122 )
def lowerCAmelCase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_: Union[str, Any] = [0, 57, 3018, 7_0307, 91, 2]
UpperCamelCase_: Union[str, Any] = self.tokenizer(
snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase_: Any = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
if not self.test_rust_tokenizer:
return
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase_: str = """I was born in 92000, and this is falsé."""
UpperCamelCase_: str = tokenizer.tokenize(snake_case_ )
UpperCamelCase_: int = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase_: int = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = self.get_rust_tokenizer()
UpperCamelCase_: Tuple = tokenizer.encode(snake_case_ )
UpperCamelCase_: Tuple = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase__ ( self : int ):
# fmt: off
UpperCamelCase_: Optional[Any] = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase_: str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=snake_case_ , )
| 670 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=_SCREAMING_SNAKE_CASE , )
assert hasattr(self , """env""" )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Optional[int] ):
UpperCamelCase_: List[str] = f'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'''
# distributed data settings
UpperCamelCase_: List[str] = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_SCREAMING_SNAKE_CASE , instance_count=_SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=_SCREAMING_SNAKE_CASE , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_SCREAMING_SNAKE_CASE , py_version="""py36""" , )
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Optional[int] ):
TrainingJobAnalytics(_SCREAMING_SNAKE_CASE ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def lowerCAmelCase__ ( self : Dict , snake_case_ : int ):
# create estimator
UpperCamelCase_: Any = self.create_estimator(_SCREAMING_SNAKE_CASE )
# run training
estimator.fit()
# result dataframe
UpperCamelCase_: Dict = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase_: List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
UpperCamelCase_: Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase_: int = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _SCREAMING_SNAKE_CASE )
| 709 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
while second != 0:
UpperCamelCase_: Optional[Any] = first & second
first ^= second
UpperCamelCase_: Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : List[Any] = int(input("""Enter the first number: """).strip())
lowerCamelCase_ : Tuple = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 670 | 0 |
from typing import List
import numpy as np
def A__ ( lowerCamelCase ) -> Union[str, Any]:
UpperCamelCase_: Any = {key: len(A_ ) for key, value in gen_kwargs.items() if isinstance(A_ , A_ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"""Sharding is ambiguous for this dataset: """
+ """we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"""
+ """\n""".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ """\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, """
+ """and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."""
) )
UpperCamelCase_: Any = max(lists_lengths.values() , default=0 )
return max(1 , A_ )
def A__ ( lowerCamelCase , lowerCamelCase ) -> Optional[int]:
UpperCamelCase_: Dict = []
for group_idx in range(A_ ):
UpperCamelCase_: Union[str, Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
UpperCamelCase_: Dict = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
UpperCamelCase_: Dict = range(A_ , start + num_shards_to_add )
shards_indices_per_group.append(A_ )
return shards_indices_per_group
def A__ ( lowerCamelCase , lowerCamelCase ) -> Any:
UpperCamelCase_: str = _number_of_shards_in_gen_kwargs(A_ )
if num_shards == 1:
return [dict(A_ )]
else:
UpperCamelCase_: Dict = _distribute_shards(num_shards=A_ , max_num_jobs=A_ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(A_ , A_ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(A_ ) )
]
def A__ ( lowerCamelCase ) -> Any:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , A_ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def A__ ( lowerCamelCase , lowerCamelCase ) -> Any:
UpperCamelCase_: List[Any] = {len(A_ ) for value in gen_kwargs.values() if isinstance(A_ , A_ )}
UpperCamelCase_: Any = {}
for size in list_sizes:
UpperCamelCase_: int = list(range(A_ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
UpperCamelCase_: Optional[Any] = dict(A_ )
for key, value in shuffled_kwargs.items():
if isinstance(A_ , A_ ):
UpperCamelCase_: Optional[Any] = [value[i] for i in indices_per_size[len(A_ )]]
return shuffled_kwargs
| 710 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCamelCase_ : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCamelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Optional[str] = field(default=_A , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCAmelCase__ ( self : Dict ):
if self.train_file is not None:
UpperCamelCase_: Union[str, Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCamelCase_: Dict = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : PreTrainedTokenizerBase
__UpperCamelCase : Union[bool, str, PaddingStrategy] = True
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[int] = None
def __call__( self : Optional[int] , snake_case_ : Dict ):
UpperCamelCase_: Dict = """label""" if """label""" in features[0].keys() else """labels"""
UpperCamelCase_: int = [feature.pop(snake_case_ ) for feature in features]
UpperCamelCase_: Optional[Any] = len(snake_case_ )
UpperCamelCase_: List[str] = len(features[0]["""input_ids"""] )
UpperCamelCase_: Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
UpperCamelCase_: Any = list(chain(*snake_case_ ) )
UpperCamelCase_: List[Any] = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
UpperCamelCase_: Tuple = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
UpperCamelCase_: Optional[int] = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def A__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_: str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase_: Dict = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCamelCase_: List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase_: List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCamelCase_: List[str] = {}
if data_args.train_file is not None:
UpperCamelCase_: List[Any] = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase_: Optional[int] = data_args.validation_file
UpperCamelCase_: Any = data_args.train_file.split(""".""" )[-1]
UpperCamelCase_: Tuple = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCamelCase_: int = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCamelCase_: Union[str, Any] = [F'''ending{i}''' for i in range(4 )]
UpperCamelCase_: str = """sent1"""
UpperCamelCase_: List[str] = """sent2"""
if data_args.max_seq_length is None:
UpperCamelCase_: int = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
UpperCamelCase_: Optional[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
UpperCamelCase_: Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase ):
UpperCamelCase_: Optional[Any] = [[context] * 4 for context in examples[context_name]]
UpperCamelCase_: Dict = examples[question_header_name]
UpperCamelCase_: List[str] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
UpperCamelCase_: str = list(chain(*lowerCamelCase ) )
UpperCamelCase_: Any = list(chain(*lowerCamelCase ) )
# Tokenize
UpperCamelCase_: Any = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
UpperCamelCase_: str = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
UpperCamelCase_: Union[str, Any] = min(len(lowerCamelCase ) , data_args.max_train_samples )
UpperCamelCase_: Optional[int] = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
UpperCamelCase_: str = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
UpperCamelCase_: Dict = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
UpperCamelCase_: str = min(len(lowerCamelCase ) , data_args.max_eval_samples )
UpperCamelCase_: Tuple = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
UpperCamelCase_: str = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCamelCase_: str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase ):
UpperCamelCase_, UpperCamelCase_: List[str] = eval_predictions
UpperCamelCase_: Optional[Any] = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCamelCase_: Union[str, Any] = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase_: List[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase_: int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase_: str = last_checkpoint
UpperCamelCase_: Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase_: Tuple = train_result.metrics
UpperCamelCase_: Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""train""" , lowerCamelCase )
trainer.save_metrics("""train""" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase_: Optional[Any] = trainer.evaluate()
UpperCamelCase_: Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""eval""" , lowerCamelCase )
trainer.save_metrics("""eval""" , lowerCamelCase )
UpperCamelCase_: Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def A__ ( lowerCamelCase ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 670 | 0 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Any , snake_case_ : str ):
UpperCamelCase_: str = jnp.ones((batch_size, length) ) / length
return scores
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Union[str, Any] = None
UpperCamelCase_: List[Any] = 20
UpperCamelCase_: str = self._get_uniform_logits(batch_size=2 , length=_A )
# tweak scores to not be uniform anymore
UpperCamelCase_: int = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
UpperCamelCase_: int = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
UpperCamelCase_: List[str] = jax.nn.softmax(_A , axis=-1 )
UpperCamelCase_: Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCamelCase_: Optional[Any] = FlaxTemperatureLogitsWarper(temperature=1.3 )
UpperCamelCase_: Tuple = jax.nn.softmax(temp_dist_warper_sharper(_A , scores.copy() , cur_len=_A ) , axis=-1 )
UpperCamelCase_: str = jax.nn.softmax(temp_dist_warper_smoother(_A , scores.copy() , cur_len=_A ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: List[str] = None
UpperCamelCase_: Dict = 10
UpperCamelCase_: Optional[Any] = 2
# create ramp distribution
UpperCamelCase_: Union[str, Any] = np.broadcast_to(np.arange(_A )[None, :] , (batch_size, vocab_size) ).copy()
UpperCamelCase_: Any = ramp_logits[1:, : vocab_size // 2] + vocab_size
UpperCamelCase_: List[Any] = FlaxTopKLogitsWarper(3 )
UpperCamelCase_: List[Any] = top_k_warp(_A , _A , cur_len=_A )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
UpperCamelCase_: List[str] = 5
UpperCamelCase_: int = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
UpperCamelCase_: List[str] = np.broadcast_to(np.arange(_A )[None, :] , (batch_size, length) ).copy()
UpperCamelCase_: Optional[int] = top_k_warp_safety_check(_A , _A , cur_len=_A )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Tuple = None
UpperCamelCase_: Optional[Any] = 10
UpperCamelCase_: Optional[int] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
UpperCamelCase_: List[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
UpperCamelCase_: Any = FlaxTopPLogitsWarper(0.8 )
UpperCamelCase_: Any = np.exp(top_p_warp(_A , _A , cur_len=_A ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
UpperCamelCase_: Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# check edge cases with negative and extreme logits
UpperCamelCase_: str = np.broadcast_to(np.arange(_A )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
UpperCamelCase_: Any = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
UpperCamelCase_: Optional[Any] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
UpperCamelCase_: int = top_p_warp(_A , _A , cur_len=_A )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[Any] = 20
UpperCamelCase_: List[Any] = 4
UpperCamelCase_: Dict = 0
UpperCamelCase_: Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_A )
# check that min length is applied at length 5
UpperCamelCase_: Optional[int] = ids_tensor((batch_size, 20) , vocab_size=20 )
UpperCamelCase_: str = 5
UpperCamelCase_: Union[str, Any] = self._get_uniform_logits(_A , _A )
UpperCamelCase_: str = min_dist_processor(_A , _A , cur_len=_A )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
UpperCamelCase_: Tuple = self._get_uniform_logits(_A , _A )
UpperCamelCase_: Any = 15
UpperCamelCase_: Dict = min_dist_processor(_A , _A , cur_len=_A )
self.assertFalse(jnp.isinf(_A ).any() )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Optional[Any] = 20
UpperCamelCase_: int = 4
UpperCamelCase_: int = 0
UpperCamelCase_: List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_A )
# check that all scores are -inf except the bos_token_id score
UpperCamelCase_: Optional[int] = ids_tensor((batch_size, 1) , vocab_size=20 )
UpperCamelCase_: Tuple = 1
UpperCamelCase_: Tuple = self._get_uniform_logits(_A , _A )
UpperCamelCase_: Optional[int] = logits_processor(_A , _A , cur_len=_A )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
UpperCamelCase_: str = 3
UpperCamelCase_: Optional[int] = self._get_uniform_logits(_A , _A )
UpperCamelCase_: Tuple = logits_processor(_A , _A , cur_len=_A )
self.assertFalse(jnp.isinf(_A ).any() )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = 20
UpperCamelCase_: Optional[Any] = 4
UpperCamelCase_: Dict = 0
UpperCamelCase_: Optional[Any] = 5
UpperCamelCase_: str = FlaxForcedEOSTokenLogitsProcessor(max_length=_A , eos_token_id=_A )
# check that all scores are -inf except the eos_token_id when max_length is reached
UpperCamelCase_: int = ids_tensor((batch_size, 4) , vocab_size=20 )
UpperCamelCase_: int = 4
UpperCamelCase_: Union[str, Any] = self._get_uniform_logits(_A , _A )
UpperCamelCase_: List[str] = logits_processor(_A , _A , cur_len=_A )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
UpperCamelCase_: Any = 3
UpperCamelCase_: Union[str, Any] = self._get_uniform_logits(_A , _A )
UpperCamelCase_: Optional[Any] = logits_processor(_A , _A , cur_len=_A )
self.assertFalse(jnp.isinf(_A ).any() )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: int = 4
UpperCamelCase_: List[str] = 10
UpperCamelCase_: int = 15
UpperCamelCase_: Any = 2
UpperCamelCase_: Dict = 1
UpperCamelCase_: Optional[int] = 15
# dummy input_ids and scores
UpperCamelCase_: Union[str, Any] = ids_tensor((batch_size, sequence_length) , _A )
UpperCamelCase_: str = input_ids.copy()
UpperCamelCase_: Union[str, Any] = self._get_uniform_logits(_A , _A )
UpperCamelCase_: int = scores.copy()
# instantiate all dist processors
UpperCamelCase_: List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCamelCase_: Optional[int] = FlaxTopKLogitsWarper(3 )
UpperCamelCase_: Union[str, Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCamelCase_: List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_A )
UpperCamelCase_: Tuple = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_A )
UpperCamelCase_: Any = FlaxForcedEOSTokenLogitsProcessor(max_length=_A , eos_token_id=_A )
UpperCamelCase_: List[str] = 10
# no processor list
UpperCamelCase_: Dict = temp_dist_warp(_A , _A , cur_len=_A )
UpperCamelCase_: Optional[int] = top_k_warp(_A , _A , cur_len=_A )
UpperCamelCase_: Dict = top_p_warp(_A , _A , cur_len=_A )
UpperCamelCase_: int = min_dist_proc(_A , _A , cur_len=_A )
UpperCamelCase_: List[Any] = bos_dist_proc(_A , _A , cur_len=_A )
UpperCamelCase_: Optional[int] = eos_dist_proc(_A , _A , cur_len=_A )
# with processor list
UpperCamelCase_: Dict = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCamelCase_: List[str] = processor(_A , _A , cur_len=_A )
# scores should be equal
self.assertTrue(jnp.allclose(_A , _A , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: int = 4
UpperCamelCase_: List[Any] = 10
UpperCamelCase_: Tuple = 15
UpperCamelCase_: Union[str, Any] = 2
UpperCamelCase_: Optional[int] = 1
UpperCamelCase_: List[str] = 15
# dummy input_ids and scores
UpperCamelCase_: int = ids_tensor((batch_size, sequence_length) , _A )
UpperCamelCase_: Optional[int] = input_ids.copy()
UpperCamelCase_: Optional[Any] = self._get_uniform_logits(_A , _A )
UpperCamelCase_: List[Any] = scores.copy()
# instantiate all dist processors
UpperCamelCase_: Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCamelCase_: Optional[int] = FlaxTopKLogitsWarper(3 )
UpperCamelCase_: Tuple = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCamelCase_: Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_A )
UpperCamelCase_: Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_A )
UpperCamelCase_: Any = FlaxForcedEOSTokenLogitsProcessor(max_length=_A , eos_token_id=_A )
UpperCamelCase_: List[str] = 10
# no processor list
def run_no_processor_list(snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Tuple ):
UpperCamelCase_: Optional[Any] = temp_dist_warp(_A , _A , cur_len=_A )
UpperCamelCase_: Optional[Any] = top_k_warp(_A , _A , cur_len=_A )
UpperCamelCase_: List[Any] = top_p_warp(_A , _A , cur_len=_A )
UpperCamelCase_: Optional[Any] = min_dist_proc(_A , _A , cur_len=_A )
UpperCamelCase_: Tuple = bos_dist_proc(_A , _A , cur_len=_A )
UpperCamelCase_: Dict = eos_dist_proc(_A , _A , cur_len=_A )
return scores
# with processor list
def run_processor_list(snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : Tuple ):
UpperCamelCase_: Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCamelCase_: List[Any] = processor(_A , _A , cur_len=_A )
return scores
UpperCamelCase_: Dict = jax.jit(_A )
UpperCamelCase_: Optional[Any] = jax.jit(_A )
UpperCamelCase_: List[str] = jitted_run_no_processor_list(_A , _A , _A )
UpperCamelCase_: List[Any] = jitted_run_processor_list(_A , _A , _A )
# scores should be equal
self.assertTrue(jnp.allclose(_A , _A , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 711 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : Union[str, Any] = logging.getLogger()
lowerCamelCase_ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Dict ):
os.makedirs(snake_case_ , exist_ok=snake_case_ )
UpperCamelCase_: int = {"""source""": """What is love ?""", """target""": """life"""}
UpperCamelCase_: Tuple = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase_: Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(snake_case_ , f'''{split}.{field}''' ) , """w""" ) as f:
f.write(snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : int , snake_case_ : str = "pytorch" ):
UpperCamelCase_: Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Dict = os.path.join(snake_case_ , """output""" )
UpperCamelCase_: Any = os.path.join(snake_case_ , """data""" )
self._create_dummy_data(data_dir=snake_case_ )
UpperCamelCase_: Union[str, Any] = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
UpperCamelCase_: Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(snake_case_ , env=self.get_env() )
UpperCamelCase_: Optional[int] = os.path.join(snake_case_ , """metrics.json""" )
with open(snake_case_ ) as f:
UpperCamelCase_: Any = json.load(snake_case_ )
return result
@require_torch_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 670 | 0 |
def A__ ( lowerCamelCase = 1_00_00_00 ) -> int:
UpperCamelCase_: Union[str, Any] = limit + 1
UpperCamelCase_: int = [0] * limit
for first_term in range(1 , __snake_case ):
for n in range(__snake_case , __snake_case , __snake_case ):
UpperCamelCase_: str = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
UpperCamelCase_: Optional[Any] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 712 |
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : int , snake_case_ : Optional[Any]=None , snake_case_ : List[str]=None ):
UpperCamelCase_: List[Any] = data
UpperCamelCase_: List[Any] = previous
UpperCamelCase_: Tuple = next_node
def __str__( self : Dict ):
return f'''{self.data}'''
def lowerCAmelCase__ ( self : List[str] ):
return self.data
def lowerCAmelCase__ ( self : Any ):
return self.next
def lowerCAmelCase__ ( self : List[str] ):
return self.previous
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = head
def __iter__( self : Union[str, Any] ):
return self
def lowerCAmelCase__ ( self : Union[str, Any] ):
if not self.current:
raise StopIteration
else:
UpperCamelCase_: Dict = self.current.get_data()
UpperCamelCase_: Tuple = self.current.get_next()
return value
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int ):
UpperCamelCase_: Optional[int] = None # First node in list
UpperCamelCase_: Dict = None # Last node in list
def __str__( self : Tuple ):
UpperCamelCase_: int = self.head
UpperCamelCase_: Tuple = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase_: List[str] = current.get_next()
return " ".join(str(snake_case_ ) for node in nodes )
def __contains__( self : int , snake_case_ : int ):
UpperCamelCase_: Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase_: Any = current.get_next()
return False
def __iter__( self : Any ):
return LinkedListIterator(self.head )
def lowerCAmelCase__ ( self : Tuple ):
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Node ):
if self.head is None:
UpperCamelCase_: Tuple = node
UpperCamelCase_: Optional[int] = node
else:
self.insert_before_node(self.head , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node ):
if self.head is None:
self.set_head(snake_case_ )
else:
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int ):
UpperCamelCase_: Any = Node(snake_case_ )
if self.head is None:
self.set_head(snake_case_ )
else:
self.set_tail(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: str = node
UpperCamelCase_: int = node.previous
if node.get_previous() is None:
UpperCamelCase_: int = node_to_insert
else:
UpperCamelCase_: Dict = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Dict , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: Tuple = node
UpperCamelCase_: Dict = node.next
if node.get_next() is None:
UpperCamelCase_: Union[str, Any] = node_to_insert
else:
UpperCamelCase_: str = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: List[str] = Node(snake_case_ )
UpperCamelCase_: Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case_ , snake_case_ )
return
current_position += 1
UpperCamelCase_: Dict = node.next
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase_: List[Any] = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[str] ):
if (node := self.get_node(snake_case_ )) is not None:
if node == self.head:
UpperCamelCase_: Optional[int] = self.head.get_next()
if node == self.tail:
UpperCamelCase_: Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(snake_case_ )
@staticmethod
def lowerCAmelCase__ ( snake_case_ : Node ):
if node.get_next():
UpperCamelCase_: str = node.previous
if node.get_previous():
UpperCamelCase_: int = node.next
UpperCamelCase_: List[str] = None
UpperCamelCase_: int = None
def lowerCAmelCase__ ( self : str ):
return self.head is None
def A__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ) -> Any:
UpperCamelCase_: Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowerCamelCase_ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowerCamelCase_ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowerCamelCase_ )
return parser.parse_args()
def A__ ( ) -> Union[str, Any]:
UpperCamelCase_: int = parse_args()
# Import training_script as a module.
UpperCamelCase_: Tuple = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase_: Optional[Any] = script_fpath.stem
UpperCamelCase_: int = importlib.import_module(lowerCamelCase_ )
# Patch sys.argv
UpperCamelCase_: str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 713 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase_ : str = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 714 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : int ):
torch.manual_seed(0 )
UpperCamelCase_: Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCAmelCase__ ( self : Any ):
torch.manual_seed(0 )
UpperCamelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Union[str, Any] = self.dummy_uncond_unet
UpperCamelCase_: Optional[Any] = DDIMScheduler()
UpperCamelCase_: List[str] = self.dummy_vq_model
UpperCamelCase_: List[Any] = LDMPipeline(unet=snake_case_ , vqvae=snake_case_ , scheduler=snake_case_ )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = torch.manual_seed(0 )
UpperCamelCase_: int = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" ).images
UpperCamelCase_: Dict = torch.manual_seed(0 )
UpperCamelCase_: str = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=snake_case_ )[0]
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: str = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCamelCase_: Optional[Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = ldm(generator=snake_case_ , num_inference_steps=5 , output_type="""numpy""" ).images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase_: List[str] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCamelCase_: Dict = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 670 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCamelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : str = IFInpaintingPipeline
__UpperCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__UpperCamelCase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCamelCase : int = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowerCAmelCase__ ( self : Tuple ):
return self._get_dummy_components()
def lowerCAmelCase__ ( self : str , snake_case_ : Dict , snake_case_ : List[Any]=0 ):
if str(snake_case_ ).startswith("""mps""" ):
UpperCamelCase_: Tuple = torch.manual_seed(snake_case_ )
else:
UpperCamelCase_: Dict = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
UpperCamelCase_: Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
UpperCamelCase_: List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
UpperCamelCase_: Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCAmelCase__ ( self : Tuple ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase__ ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase__ ( self : Optional[int] ):
self._test_save_load_local()
def lowerCAmelCase__ ( self : List[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 715 |
def A__ ( lowerCamelCase = 50 ) -> int:
UpperCamelCase_: List[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 670 | 0 |
def A__ ( lowerCamelCase ) -> str:
if number > 0:
raise ValueError("""input must be a negative integer""" )
UpperCamelCase_: List[str] = len(bin(a__ )[3:] )
UpperCamelCase_: List[str] = bin(abs(a__ ) - (1 << binary_number_length) )[3:]
UpperCamelCase_: Dict = (
(
"""1"""
+ """0""" * (binary_number_length - len(a__ ))
+ twos_complement_number
)
if number < 0
else """0"""
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Initialise PyTorch model
UpperCamelCase_: List[Any] = TaConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Any = TaForConditionalGeneration(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 670 | 0 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCamelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : str , snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : int , snake_case_ : Optional[Any] = False , ):
super().__init__()
UpperCamelCase_: List[str] = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCamelCase_: Optional[Any] = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCamelCase_: str = False
UpperCamelCase_: List[str] = nn.Dropout(p=lowerCAmelCase_ )
UpperCamelCase_: int = TaConfig(
vocab_size=lowerCAmelCase_ , d_model=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , feed_forward_proj=lowerCAmelCase_ , is_decoder=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , )
UpperCamelCase_: Dict = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
UpperCamelCase_: Any = TaBlock(lowerCAmelCase_ )
self.encoders.append(lowerCAmelCase_ )
UpperCamelCase_: List[Any] = TaLayerNorm(lowerCAmelCase_ )
UpperCamelCase_: Tuple = nn.Dropout(p=lowerCAmelCase_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] ):
UpperCamelCase_: Tuple = self.token_embedder(lowerCAmelCase_ )
UpperCamelCase_: List[str] = encoder_input_tokens.shape[1]
UpperCamelCase_: List[Any] = torch.arange(lowerCAmelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCAmelCase_ )
UpperCamelCase_: Union[str, Any] = self.dropout_pre(lowerCAmelCase_ )
# inverted the attention mask
UpperCamelCase_: str = encoder_input_tokens.size()
UpperCamelCase_: str = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ )
for lyr in self.encoders:
UpperCamelCase_: str = lyr(lowerCAmelCase_ , lowerCAmelCase_ )[0]
UpperCamelCase_: Optional[Any] = self.layer_norm(lowerCAmelCase_ )
return self.dropout_post(lowerCAmelCase_ ), encoder_inputs_mask
| 717 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : str = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : List[str] = ['image_processor', 'tokenizer']
__UpperCamelCase : Any = 'CLIPImageProcessor'
__UpperCamelCase : Optional[int] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Union[str, Any] , snake_case_ : str=None , snake_case_ : Union[str, Any]=None , **snake_case_ : Optional[int] ):
UpperCamelCase_: Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCAmelCase__ , )
UpperCamelCase_: List[Any] = kwargs.pop("""feature_extractor""" )
UpperCamelCase_: Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def __call__( self : Any , snake_case_ : Dict=None , snake_case_ : Union[str, Any]=None , snake_case_ : int=None , **snake_case_ : Union[str, Any] ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCamelCase_: int = self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if images is not None:
UpperCamelCase_: Dict = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if text is not None and images is not None:
UpperCamelCase_: Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase__ ) , tensor_type=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : List[str] , *snake_case_ : Dict , **snake_case_ : int ):
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , *snake_case_ : Optional[Any] , **snake_case_ : str ):
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.tokenizer.model_input_names
UpperCamelCase_: Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase__ ( self : Tuple ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCAmelCase__ , )
return self.image_processor_class
@property
def lowerCAmelCase__ ( self : Any ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCAmelCase__ , )
return self.image_processor
| 718 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = "x" , lowerCamelCase = 10**-10 , lowerCamelCase = 1 , ) -> complex:
UpperCamelCase_: Optional[Any] = symbols(lowerCamelCase )
UpperCamelCase_: int = lambdify(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[Any] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_: Tuple = starting_point
while True:
if diff_function(lowerCamelCase ) != 0:
UpperCamelCase_: List[Any] = prev_guess - multiplicity * func(lowerCamelCase ) / diff_function(
lowerCamelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase_: Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 670 | 0 |
from __future__ import annotations
def A__ ( lowerCamelCase , lowerCamelCase ) -> bool:
UpperCamelCase_: Union[str, Any] = get_failure_array(lowerCamelCase )
# 2) Step through text searching for pattern
UpperCamelCase_: Union[str, Any] = 0, 0 # index into text, pattern
while i < len(lowerCamelCase ):
if pattern[j] == text[i]:
if j == (len(lowerCamelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
UpperCamelCase_: str = failure[j - 1]
continue
i += 1
return False
def A__ ( lowerCamelCase ) -> list[int]:
UpperCamelCase_: Tuple = [0]
UpperCamelCase_: List[str] = 0
UpperCamelCase_: Optional[int] = 1
while j < len(lowerCamelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
UpperCamelCase_: Dict = failure[i - 1]
continue
j += 1
failure.append(lowerCamelCase )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase_ : Any = """abc1abc12"""
lowerCamelCase_ : int = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowerCamelCase_ : Dict = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase_ : str = """ABABX"""
lowerCamelCase_ : List[str] = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
lowerCamelCase_ : Dict = """AAAB"""
lowerCamelCase_ : int = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
lowerCamelCase_ : Optional[int] = """abcdabcy"""
lowerCamelCase_ : str = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
lowerCamelCase_ : Tuple = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 719 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase_ : List[Any] = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class _UpperCamelCase ( __lowerCamelCase ):
'''simple docstring'''
__UpperCamelCase : str = """open-llama"""
def __init__( self : Optional[Any] , snake_case_ : Union[str, Any]=10_0000 , snake_case_ : Tuple=4096 , snake_case_ : List[Any]=1_1008 , snake_case_ : Any=32 , snake_case_ : List[str]=32 , snake_case_ : int="silu" , snake_case_ : int=2048 , snake_case_ : List[str]=0.02 , snake_case_ : int=1e-6 , snake_case_ : List[Any]=True , snake_case_ : str=0 , snake_case_ : Any=1 , snake_case_ : str=2 , snake_case_ : List[str]=False , snake_case_ : int=True , snake_case_ : Optional[int]=0.1 , snake_case_ : Optional[Any]=0.1 , snake_case_ : Optional[int]=True , snake_case_ : str=True , snake_case_ : Union[str, Any]=None , **snake_case_ : List[Any] , ):
UpperCamelCase_: str = vocab_size
UpperCamelCase_: List[Any] = max_position_embeddings
UpperCamelCase_: Dict = hidden_size
UpperCamelCase_: List[Any] = intermediate_size
UpperCamelCase_: Optional[int] = num_hidden_layers
UpperCamelCase_: Union[str, Any] = num_attention_heads
UpperCamelCase_: Optional[int] = hidden_act
UpperCamelCase_: int = initializer_range
UpperCamelCase_: Optional[int] = rms_norm_eps
UpperCamelCase_: Optional[Any] = use_cache
UpperCamelCase_: Dict = kwargs.pop(
"""use_memorry_efficient_attention""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase_: Dict = hidden_dropout_prob
UpperCamelCase_: Optional[Any] = attention_dropout_prob
UpperCamelCase_: int = use_stable_embedding
UpperCamelCase_: Tuple = shared_input_output_embedding
UpperCamelCase_: Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def lowerCAmelCase__ ( self : Any ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
UpperCamelCase_: Any = self.rope_scaling.get("""type""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase_: str = self.rope_scaling.get("""factor""" , SCREAMING_SNAKE_CASE_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 720 |
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[Any] = Text("""CPU""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(1 )]
UpperCamelCase_: Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[int] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
UpperCamelCase_: Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""Model""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
UpperCamelCase_: List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Tuple = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
UpperCamelCase_: int = 0.46 / 4
UpperCamelCase_: Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 670 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
lowerCamelCase_ : Tuple = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class _UpperCamelCase ( _snake_case ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = """pix2struct_text_model"""
__UpperCamelCase : Optional[Any] = ["""past_key_values"""]
__UpperCamelCase : List[Any] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Optional[Any] , snake_case_ : str=5_0244 , snake_case_ : List[Any]=768 , snake_case_ : Any=64 , snake_case_ : int=2048 , snake_case_ : int=12 , snake_case_ : List[Any]=12 , snake_case_ : Tuple=32 , snake_case_ : Optional[int]=128 , snake_case_ : int=0.1 , snake_case_ : Optional[int]=1e-6 , snake_case_ : List[Any]=1.0 , snake_case_ : List[str]="gelu_new" , snake_case_ : List[str]=0 , snake_case_ : str=False , snake_case_ : List[str]=0 , snake_case_ : str=1 , snake_case_ : List[str]=False , snake_case_ : Dict=True , **snake_case_ : Any , ):
UpperCamelCase_: int = vocab_size
UpperCamelCase_: List[str] = hidden_size
UpperCamelCase_: int = d_kv
UpperCamelCase_: Any = d_ff
UpperCamelCase_: int = num_layers
UpperCamelCase_: List[str] = num_heads
UpperCamelCase_: Any = relative_attention_num_buckets
UpperCamelCase_: Union[str, Any] = relative_attention_max_distance
UpperCamelCase_: str = dropout_rate
UpperCamelCase_: str = layer_norm_epsilon
UpperCamelCase_: Optional[Any] = initializer_factor
UpperCamelCase_: Any = use_cache
UpperCamelCase_: Dict = eos_token_id
UpperCamelCase_: Dict = decoder_start_token_id
# for backwards compatibility
UpperCamelCase_: List[str] = dense_act_fn
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , tie_word_embeddings=snake_case_ , is_decoder=snake_case_ , **snake_case_ , )
@classmethod
def lowerCAmelCase__ ( cls : Tuple , snake_case_ : Optional[Any] , **snake_case_ : List[str] ):
cls._set_token_in_kwargs(snake_case_ )
UpperCamelCase_: Dict = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
UpperCamelCase_: int = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case_ , **snake_case_ )
class _UpperCamelCase ( _snake_case ):
'''simple docstring'''
__UpperCamelCase : List[str] = """pix2struct_vision_model"""
def __init__( self : Optional[Any] , snake_case_ : int=768 , snake_case_ : Dict=768 , snake_case_ : List[Any]=2048 , snake_case_ : Any=64 , snake_case_ : Optional[Any]=12 , snake_case_ : Optional[int]=12 , snake_case_ : Tuple="gelu_new" , snake_case_ : Optional[int]=1e-6 , snake_case_ : List[Any]=0.0 , snake_case_ : Dict=0.0 , snake_case_ : Optional[int]=1e-10 , snake_case_ : Dict=1.0 , snake_case_ : Any=4096 , snake_case_ : List[str]=32 , snake_case_ : List[str]=128 , **snake_case_ : List[str] , ):
super().__init__(**snake_case_ )
UpperCamelCase_: Optional[Any] = hidden_size
UpperCamelCase_: Dict = patch_embed_hidden_size
UpperCamelCase_: Tuple = d_ff
UpperCamelCase_: Tuple = dropout_rate
UpperCamelCase_: int = num_hidden_layers
UpperCamelCase_: Optional[int] = num_attention_heads
UpperCamelCase_: Any = initializer_range
UpperCamelCase_: List[str] = initializer_factor
UpperCamelCase_: Tuple = attention_dropout
UpperCamelCase_: Optional[int] = layer_norm_eps
UpperCamelCase_: int = dense_act_fn
UpperCamelCase_: int = seq_len
UpperCamelCase_: Optional[Any] = relative_attention_num_buckets
UpperCamelCase_: str = relative_attention_max_distance
UpperCamelCase_: List[Any] = d_kv
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] , snake_case_ : int , **snake_case_ : Optional[Any] ):
cls._set_token_in_kwargs(snake_case_ )
UpperCamelCase_: str = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
UpperCamelCase_: Optional[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case_ , **snake_case_ )
class _UpperCamelCase ( _snake_case ):
'''simple docstring'''
__UpperCamelCase : List[Any] = """pix2struct"""
__UpperCamelCase : str = True
def __init__( self : Any , snake_case_ : Dict=None , snake_case_ : Union[str, Any]=None , snake_case_ : Dict=1.0 , snake_case_ : List[Any]=0.02 , snake_case_ : Union[str, Any]=False , snake_case_ : int=False , snake_case_ : Tuple=True , **snake_case_ : Optional[Any] , ):
super().__init__(tie_word_embeddings=snake_case_ , is_encoder_decoder=snake_case_ , **snake_case_ )
if text_config is None:
UpperCamelCase_: List[str] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
UpperCamelCase_: Optional[Any] = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
UpperCamelCase_: Tuple = PixaStructTextConfig(**snake_case_ )
UpperCamelCase_: Optional[int] = PixaStructVisionConfig(**snake_case_ )
UpperCamelCase_: List[str] = self.text_config.decoder_start_token_id
UpperCamelCase_: List[str] = self.text_config.pad_token_id
UpperCamelCase_: Dict = self.text_config.eos_token_id
UpperCamelCase_: Union[str, Any] = initializer_factor
UpperCamelCase_: str = initializer_range
UpperCamelCase_: Any = self.initializer_range
UpperCamelCase_: Optional[int] = self.initializer_range
UpperCamelCase_: List[str] = is_vqa
@classmethod
def lowerCAmelCase__ ( cls : Optional[Any] , snake_case_ : Dict , snake_case_ : Optional[Any] , **snake_case_ : int ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_ )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: List[str] = copy.deepcopy(self.__dict__ )
UpperCamelCase_: str = self.text_config.to_dict()
UpperCamelCase_: Optional[int] = self.vision_config.to_dict()
UpperCamelCase_: Optional[int] = self.__class__.model_type
return output
| 721 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = """laion/clap-htsat-unfused"""
UpperCamelCase_: List[str] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Dict = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[str] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Optional[Any] = floats_list((3, 1000) )
UpperCamelCase_: List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: int = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.get_feature_extractor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Dict = """This is a test string"""
UpperCamelCase_: Tuple = processor(text=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = self.get_feature_extractor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Tuple = processor.batch_decode(snake_case_ )
UpperCamelCase_: str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = self.get_feature_extractor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 670 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Any , snake_case_ : int , snake_case_ : Optional[int]=13 , snake_case_ : Union[str, Any]=7 , snake_case_ : str=True , snake_case_ : int=True , snake_case_ : Optional[Any]=True , snake_case_ : List[Any]=99 , snake_case_ : Union[str, Any]=32 , snake_case_ : Union[str, Any]=5 , snake_case_ : Union[str, Any]=4 , snake_case_ : Tuple=37 , snake_case_ : List[Any]="gelu" , snake_case_ : Any=0.1 , snake_case_ : Dict=0.1 , snake_case_ : Union[str, Any]=512 , snake_case_ : List[Any]=16 , snake_case_ : List[Any]=2 , snake_case_ : str=0.02 , snake_case_ : str=3 , snake_case_ : Optional[Any]=4 , snake_case_ : Optional[int]=None , ):
UpperCamelCase_: List[str] = parent
UpperCamelCase_: Tuple = batch_size
UpperCamelCase_: Any = seq_length
UpperCamelCase_: str = is_training
UpperCamelCase_: str = use_token_type_ids
UpperCamelCase_: int = use_labels
UpperCamelCase_: List[str] = vocab_size
UpperCamelCase_: List[str] = hidden_size
UpperCamelCase_: Optional[int] = num_hidden_layers
UpperCamelCase_: List[str] = num_attention_heads
UpperCamelCase_: List[Any] = intermediate_size
UpperCamelCase_: Union[str, Any] = hidden_act
UpperCamelCase_: Optional[Any] = hidden_dropout_prob
UpperCamelCase_: Optional[Any] = attention_probs_dropout_prob
UpperCamelCase_: int = max_position_embeddings
UpperCamelCase_: Optional[int] = type_vocab_size
UpperCamelCase_: Any = type_sequence_label_size
UpperCamelCase_: Tuple = initializer_range
UpperCamelCase_: Tuple = num_labels
UpperCamelCase_: Union[str, Any] = num_choices
UpperCamelCase_: Any = scope
UpperCamelCase_: Tuple = self.vocab_size - 1
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: Union[str, Any] = None
if self.use_token_type_ids:
UpperCamelCase_: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_: str = None
UpperCamelCase_: str = None
UpperCamelCase_: Optional[int] = None
if self.use_labels:
UpperCamelCase_: Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_: str = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_: Dict = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
UpperCamelCase_: Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Tuple , *snake_case_ : Dict ):
UpperCamelCase_: Optional[Any] = OpenAIGPTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCamelCase_: List[Any] = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
UpperCamelCase_: Dict = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : str , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Any , *snake_case_ : List[Any] ):
UpperCamelCase_: Tuple = OpenAIGPTLMHeadModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCamelCase_: Tuple = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : int , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : int , *snake_case_ : Union[str, Any] ):
UpperCamelCase_: str = OpenAIGPTDoubleHeadsModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCamelCase_: Any = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : int , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Dict , *snake_case_ : Any ):
UpperCamelCase_: Dict = self.num_labels
UpperCamelCase_: Union[str, Any] = OpenAIGPTForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCamelCase_: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_: Dict = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: str = self.prepare_config_and_inputs()
(
UpperCamelCase_
): Dict = config_and_inputs
UpperCamelCase_: Optional[int] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__UpperCamelCase : Any = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__UpperCamelCase : List[str] = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase__ ( self : str , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Dict ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCAmelCase__ ( self : Tuple , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Dict=False ):
UpperCamelCase_: str = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
UpperCamelCase_: List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ , )
UpperCamelCase_: List[Any] = inputs_dict['''labels''']
UpperCamelCase_: Optional[int] = inputs_dict['''labels''']
UpperCamelCase_: Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCAmelCase__ , )
UpperCamelCase_: str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
return inputs_dict
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Optional[int] = OpenAIGPTModelTester(self )
UpperCamelCase_: str = ConfigTester(self , config_class=UpperCAmelCase__ , n_embd=37 )
def lowerCAmelCase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Any ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_: Optional[Any] = OpenAIGPTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=UpperCAmelCase__ ) # the president is
UpperCamelCase_: Optional[int] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
UpperCamelCase_: Optional[int] = model.generate(UpperCAmelCase__ , do_sample=UpperCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , UpperCAmelCase__ )
| 700 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Tuple=None , **snake_case_ : List[str] ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case_ , )
super().__init__(args=snake_case_ , **snake_case_ )
| 670 | 0 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Any = ort.SessionOptions()
UpperCamelCase_: Union[str, Any] = False
return options
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
UpperCamelCase_: str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
UpperCamelCase_: int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = """A red cat sitting on a park bench"""
UpperCamelCase_: List[Any] = np.random.RandomState(0 )
UpperCamelCase_: Optional[int] = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCamelCase , output_type="""np""" , )
UpperCamelCase_: Optional[int] = output.images
UpperCamelCase_: Tuple = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase_: Union[str, Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
UpperCamelCase_: Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
UpperCamelCase_: int = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
UpperCamelCase_: List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: List[Any] = """A red cat sitting on a park bench"""
UpperCamelCase_: Optional[int] = np.random.RandomState(0 )
UpperCamelCase_: Union[str, Any] = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowerCamelCase , output_type="""np""" , )
UpperCamelCase_: Union[str, Any] = output.images
UpperCamelCase_: List[Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase_: Any = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 701 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = logging.get_logger("""transformers.models.speecht5""")
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
hf_model.apply_weight_norm()
UpperCamelCase_: Union[str, Any] = checkpoint["""input_conv.weight_g"""]
UpperCamelCase_: Optional[int] = checkpoint["""input_conv.weight_v"""]
UpperCamelCase_: List[Any] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCamelCase_: Dict = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCamelCase_: Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCamelCase_: int = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCamelCase_: int = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase_: Tuple = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase_: List[str] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if config_path is not None:
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_: str = SpeechTaHifiGanConfig()
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGan(lowerCamelCase )
UpperCamelCase_: str = torch.load(lowerCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = np.load(lowerCamelCase )
UpperCamelCase_: int = stats[0].reshape(-1 )
UpperCamelCase_: Union[str, Any] = stats[1].reshape(-1 )
UpperCamelCase_: Dict = torch.from_numpy(lowerCamelCase ).float()
UpperCamelCase_: Optional[Any] = torch.from_numpy(lowerCamelCase ).float()
model.save_pretrained(lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 670 | 0 |
import argparse
import json
from tqdm import tqdm
def A__ ( ) -> Optional[Any]:
UpperCamelCase_: int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" , type=lowerCamelCase_ , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , )
parser.add_argument(
"""--evaluation_set""" , type=lowerCamelCase_ , help="""where to store parsed evaluation_set file""" , )
parser.add_argument(
"""--gold_data_path""" , type=lowerCamelCase_ , help="""where to store parsed gold_data_path file""" , )
UpperCamelCase_: str = parser.parse_args()
with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open(
args.gold_data_path , """w""" ) as gold_file:
UpperCamelCase_: Optional[Any] = json.load(lowerCamelCase_ )
for dpr_record in tqdm(lowerCamelCase_ ):
UpperCamelCase_: Optional[Any] = dpr_record['''question''']
UpperCamelCase_: Dict = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(lowerCamelCase_ ) + """\n""" )
if __name__ == "__main__":
main()
| 702 |
lowerCamelCase_ : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Optional[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 670 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Dict = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Tuple = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 703 |
import cva
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , snake_case_ : float , snake_case_ : int ):
if k in (0.04, 0.06):
UpperCamelCase_: Union[str, Any] = k
UpperCamelCase_: Union[str, Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : int ):
return str(self.k )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : str ):
UpperCamelCase_: int = cva.imread(snake_case_ , 0 )
UpperCamelCase_, UpperCamelCase_: List[Any] = img.shape
UpperCamelCase_: list[list[int]] = []
UpperCamelCase_: int = img.copy()
UpperCamelCase_: Any = cva.cvtColor(snake_case_ , cva.COLOR_GRAY2RGB )
UpperCamelCase_, UpperCamelCase_: List[Any] = np.gradient(snake_case_ )
UpperCamelCase_: Optional[Any] = dx**2
UpperCamelCase_: Dict = dy**2
UpperCamelCase_: Optional[Any] = dx * dy
UpperCamelCase_: str = 0.04
UpperCamelCase_: int = self.window_size // 2
for y in range(snake_case_ , h - offset ):
for x in range(snake_case_ , w - offset ):
UpperCamelCase_: List[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = (wxx * wyy) - (wxy**2)
UpperCamelCase_: Optional[int] = wxx + wyy
UpperCamelCase_: Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = HarrisCorner(0.04, 3)
lowerCamelCase_ , lowerCamelCase_ : Any = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 670 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.