code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : str = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """rwkv"""
lowerCAmelCase = {"""max_position_embeddings""": """context_length"""}
def __init__( self : str , _lowercase : str=50_277 , _lowercase : Any=1_024 , _lowercase : List[Any]=4_096 , _lowercase : Tuple=32 , _lowercase : Optional[int]=None , _lowercase : str=None , _lowercase : Tuple=1e-5 , _lowercase : Optional[int]=0 , _lowercase : str=0 , _lowercase : int=6 , _lowercase : Tuple=False , _lowercase : Optional[Any]=True , **_lowercase : Dict , ):
A = vocab_size
A = context_length
A = hidden_size
A = num_hidden_layers
A = attention_hidden_size if attention_hidden_size is not None else hidden_size
A = intermediate_size if intermediate_size is not None else 4 * hidden_size
A = layer_norm_epsilon
A = rescale_every
A = use_cache
A = bos_token_id
A = eos_token_id
super().__init__(
tie_word_embeddings=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 690 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """convbert"""
def __init__( self : Optional[int] , _lowercase : List[Any]=30_522 , _lowercase : List[str]=768 , _lowercase : Optional[Any]=12 , _lowercase : Any=12 , _lowercase : str=3_072 , _lowercase : List[str]="gelu" , _lowercase : Dict=0.1 , _lowercase : Dict=0.1 , _lowercase : Any=512 , _lowercase : List[str]=2 , _lowercase : Tuple=0.0_2 , _lowercase : List[Any]=1e-12 , _lowercase : List[str]=1 , _lowercase : Tuple=0 , _lowercase : Any=2 , _lowercase : Union[str, Any]=768 , _lowercase : str=2 , _lowercase : Any=9 , _lowercase : Union[str, Any]=1 , _lowercase : Dict=None , **_lowercase : Union[str, Any] , ):
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = embedding_size
A = head_ratio
A = conv_kernel_size
A = num_groups
A = classifier_dropout
class lowerCamelCase__ ( UpperCAmelCase_ ):
@property
def __a ( self : str ):
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 690 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = LDMTextToImagePipeline
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase = False
def __a ( self : Dict ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A = CLIPTextModel(_lowercase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __a ( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any]=0 ):
if str(_lowercase ).startswith('mps' ):
A = torch.manual_seed(_lowercase )
else:
A = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Any ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = LDMTextToImagePipeline(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_dummy_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
A = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : int , _lowercase : List[Any] , _lowercase : int=torch.floataa , _lowercase : int=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Union[str, Any] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
A = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
A = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Tuple=torch.floataa , _lowercase : Optional[Any]=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : List[str] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images[0]
A = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
A = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 690 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 690 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase , cache_dir=_lowercase )
A = [t[-1] for t in os.walk(os.path.join(_lowercase , os.listdir(_lowercase )[0] , 'snapshots' ) )]
A = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 4
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(_lowercase , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
A = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_lowercase ) == num_samples
def __a ( self : Dict ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __a ( self : List[str] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : str ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : Any ):
A = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=_lowercase , steps_offset=1 , )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_lowercase , safety_checker=_lowercase , )
A = scheduler.create_state()
A = scheduler_state
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __a ( self : List[str] ):
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.device_count()
A = num_samples * [prompt]
A = jax.random.split(jax.random.PRNGKey(0 ) , _lowercase )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , use_memory_efficient_attention=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 690 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
A = {'+', '-', '*', '/'}
A = []
for token in postfix_notation:
if token in operations:
A , A = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 1 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """xlnet"""
lowerCAmelCase = ["""mems"""]
lowerCAmelCase = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str , _lowercase : Optional[Any]=32_000 , _lowercase : Dict=1_024 , _lowercase : List[Any]=24 , _lowercase : Any=16 , _lowercase : Union[str, Any]=4_096 , _lowercase : Tuple="gelu" , _lowercase : Any=True , _lowercase : Dict="bi" , _lowercase : List[Any]=0.0_2 , _lowercase : str=1e-12 , _lowercase : List[Any]=0.1 , _lowercase : Optional[int]=512 , _lowercase : Tuple=None , _lowercase : Optional[Any]=True , _lowercase : Optional[int]=False , _lowercase : Optional[Any]=False , _lowercase : Optional[Any]=-1 , _lowercase : Any=False , _lowercase : Union[str, Any]="last" , _lowercase : List[Any]=True , _lowercase : Tuple="tanh" , _lowercase : Tuple=0.1 , _lowercase : int=5 , _lowercase : int=5 , _lowercase : Optional[Any]=5 , _lowercase : Any=1 , _lowercase : str=2 , **_lowercase : Any , ):
A = vocab_size
A = d_model
A = n_layer
A = n_head
if d_model % n_head != 0:
raise ValueError(f'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
A = d_model // n_head
A = ff_activation
A = d_inner
A = untie_r
A = attn_type
A = initializer_range
A = layer_norm_eps
A = dropout
A = mem_len
A = reuse_len
A = bi_data
A = clamp_len
A = same_length
A = summary_type
A = summary_use_proj
A = summary_activation
A = summary_last_dropout
A = start_n_top
A = end_n_top
A = bos_token_id
A = pad_token_id
A = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , _lowercase , )
A = kwargs['use_cache']
A = use_mems_eval
A = use_mems_train
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
@property
def __a ( self : Optional[Any] ):
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def __a ( self : List[Any] , _lowercase : List[Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 690 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase : Any = None
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : str = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Optional[int] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCamelCase : str = "▁"
# Segments (not really needed)
UpperCamelCase : str = 0
UpperCamelCase : int = 1
UpperCamelCase : List[Any] = 2
UpperCamelCase : Union[str, Any] = 3
UpperCamelCase : Optional[Any] = 4
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = """left"""
lowerCAmelCase = XLNetTokenizer
def __init__( self : Tuple , _lowercase : List[Any]=None , _lowercase : Any=None , _lowercase : int=False , _lowercase : Tuple=True , _lowercase : Union[str, Any]=False , _lowercase : int="<s>" , _lowercase : Optional[int]="</s>" , _lowercase : Dict="<unk>" , _lowercase : Optional[int]="<sep>" , _lowercase : int="<pad>" , _lowercase : Dict="<cls>" , _lowercase : str="<mask>" , _lowercase : List[str]=["<eop>", "<eod>"] , **_lowercase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
A = 3
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def __a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class lowerCamelCase__ ( UpperCAmelCase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCAmelCase = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCAmelCase = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
lowerCAmelCase = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
lowerCAmelCase = "question"
lowerCAmelCase = "context"
lowerCAmelCase = "answers"
@property
def __a ( self : Union[str, Any] ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 690 |
"""simple docstring"""
from __future__ import annotations
UpperCamelCase : Any = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the reference grid
A = 1
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the action grid
A = init[0]
A = init[1]
A = 0
A = g + heuristic[x][y] # cost from starting cell to destination cell
A = [[f, g, x, y]]
A = False # flag that is set when search is complete
A = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A = cell.pop()
A = next_cell[2]
A = next_cell[3]
A = next_cell[1]
if x == goal[0] and y == goal[1]:
A = True
else:
for i in range(len(UpperCamelCase__ ) ): # to try out different valid actions
A = x + DIRECTIONS[i][0]
A = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A = g + cost
A = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A = 1
A = i
A = []
A = goal[0]
A = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A = x - DIRECTIONS[action[x][y]][0]
A = y - DIRECTIONS[action[x][y]][1]
A = xa
A = ya
invpath.append([x, y] )
A = []
for i in range(len(UpperCamelCase__ ) ):
path.append(invpath[len(UpperCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase : Any = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase : List[Any] = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase : Tuple = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase : Union[str, Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase : List[str] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase : Dict = 99
UpperCamelCase , UpperCamelCase : Optional[Any] = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 690 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Optional[int] = "▁"
UpperCamelCase : Any = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
UpperCamelCase : List[str] = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
UpperCamelCase : Any = {"vinai/bartpho-syllable": 1_024}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , _lowercase : List[Any] , _lowercase : Dict , _lowercase : Optional[Any]="<s>" , _lowercase : int="</s>" , _lowercase : Optional[Any]="</s>" , _lowercase : int="<s>" , _lowercase : int="<unk>" , _lowercase : Dict="<pad>" , _lowercase : List[Any]="<mask>" , _lowercase : Optional[Dict[str, Any]] = None , **_lowercase : Dict , ):
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
A = vocab_file
A = monolingual_vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowercase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
A = {}
A = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_lowercase ) not in self.fairseq_tokens_to_ids:
A = cnt
cnt += 1
with open(_lowercase , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
A = line.strip().split()[0]
A = len(self.fairseq_tokens_to_ids )
if str(_lowercase ) not in self.fairseq_tokens_to_ids:
A = len(self.fairseq_tokens_to_ids )
A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Optional[Any] ):
A = self.__dict__.copy()
A = None
A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , _lowercase : Union[str, Any] ):
A = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A = [self.cls_token_id]
A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def __a ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : Union[str, Any] ):
return len(self.fairseq_ids_to_tokens )
def __a ( self : int ):
A = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self : str , _lowercase : str ):
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def __a ( self : Dict , _lowercase : List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __a ( self : int , _lowercase : Any ):
return self.fairseq_ids_to_tokens[index]
def __a ( self : Any , _lowercase : Dict ):
A = ''.join(_lowercase ).replace(_lowercase , ' ' ).strip()
return out_string
def __a ( self : List[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase , 'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_lowercase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , _lowercase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_lowercase , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(_lowercase )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 690 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : int = {"vocab_file": "sentencepiece.model"}
UpperCamelCase : Union[str, Any] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
UpperCamelCase : Union[str, Any] = {
"google/rembert": 256,
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Dict=True , _lowercase : List[str]=True , _lowercase : int="[CLS]" , _lowercase : str="[SEP]" , _lowercase : List[str]="[UNK]" , _lowercase : List[Any]="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : List[str]="[CLS]" , _lowercase : Any="[MASK]" , **_lowercase : Optional[Any] , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def __a ( self : Tuple ):
return len(self.sp_model )
def __a ( self : List[str] ):
A = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : List[str] , _lowercase : int ):
A = d
A = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __a ( self : Dict , _lowercase : Union[str, Any] , _lowercase : Dict=False ):
A = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def __a ( self : Dict , _lowercase : Tuple ):
return self.sp_model.PieceToId(_lowercase )
def __a ( self : str , _lowercase : Optional[int] ):
return self.sp_model.IdToPiece(_lowercase )
def __a ( self : Optional[int] , _lowercase : Optional[int] ):
A = self.sp_model.decode_pieces(_lowercase )
return out_string
def __a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def __a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('Vocabulary path ({}) should be a directory'.format(_lowercase ) )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
def __init__( self : Union[str, Any] , _lowercase : Tuple , _lowercase : Union[str, Any]=3 , _lowercase : int=32 , _lowercase : List[Any]=3 , _lowercase : Dict=10 , _lowercase : List[Any]=[10, 20, 30, 40] , _lowercase : Any=[1, 1, 2, 1] , _lowercase : Tuple=True , _lowercase : int=True , _lowercase : List[str]="relu" , _lowercase : Optional[int]=3 , _lowercase : Union[str, Any]=None , ):
A = parent
A = batch_size
A = image_size
A = num_channels
A = embeddings_size
A = hidden_sizes
A = depths
A = is_training
A = use_labels
A = hidden_act
A = num_labels
A = scope
A = len(_lowercase )
def __a ( self : Union[str, Any] ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.num_labels )
A = self.get_config()
return config, pixel_values, labels
def __a ( self : Dict ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Any ):
A = TFRegNetModel(config=_lowercase )
A = model(_lowercase , training=_lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : str ):
A = self.num_labels
A = TFRegNetForImageClassification(_lowercase )
A = model(_lowercase , labels=_lowercase , training=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : Optional[Any] ):
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def __a ( self : int ):
A = TFRegNetModelTester(self )
A = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def __a ( self : str ):
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def __a ( self : Optional[int] ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def __a ( self : Any ):
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def __a ( self : Optional[int] ):
pass
def __a ( self : int ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowercase )
A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowercase )
def __a ( self : List[Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def __a ( self : Any ):
def check_hidden_states_output(_lowercase : Dict , _lowercase : Optional[int] , _lowercase : Dict ):
A = model_class(_lowercase )
A = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A = layer_type
A = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def __a ( self : List[Any] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_lowercase : List[Any] , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : int={} ):
A = model(_lowercase , return_dict=_lowercase , **_lowercase )
A = model(_lowercase , return_dict=_lowercase , **_lowercase ).to_tuple()
def recursive_check(_lowercase : List[str] , _lowercase : int ):
if isinstance(_lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowercase , _lowercase ):
recursive_check(_lowercase , _lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_lowercase , _lowercase ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
f' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'
) , )
recursive_check(_lowercase , _lowercase )
for model_class in self.all_model_classes:
A = model_class(_lowercase )
A = self._prepare_for_class(_lowercase , _lowercase )
A = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
A = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
A = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
A = self._prepare_for_class(_lowercase , _lowercase )
A = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {'output_hidden_states': True} )
A = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
A = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {'output_hidden_states': True} )
def __a ( self : Union[str, Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def __a ( self : int ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFRegNetModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def __a ( self : Tuple ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __a ( self : int ):
A = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=_lowercase , return_tensors='tf' )
# forward pass
A = model(**_lowercase , training=_lowercase )
# verify the logits
A = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowercase )
A = tf.constant([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowercase , atol=1e-4 )
| 690 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : List[Any] = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
UpperCamelCase : Any = {"mobilebert-uncased": 512}
UpperCamelCase : Any = {}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = MobileBertTokenizer
def __init__( self : Optional[int] , _lowercase : Optional[int]=None , _lowercase : Any=None , _lowercase : Optional[int]=True , _lowercase : int="[UNK]" , _lowercase : Dict="[SEP]" , _lowercase : Any="[PAD]" , _lowercase : str="[CLS]" , _lowercase : Union[str, Any]="[MASK]" , _lowercase : List[Any]=True , _lowercase : Any=None , **_lowercase : Optional[Any] , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowercase ) != tokenize_chinese_chars
):
A = getattr(_lowercase , normalizer_state.pop('type' ) )
A = do_lower_case
A = strip_accents
A = tokenize_chinese_chars
A = normalizer_class(**_lowercase )
A = do_lower_case
def __a ( self : List[Any] , _lowercase : Tuple , _lowercase : Any=None ):
A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 690 | 1 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowercase )
try:
pickle.loads(pickle.dumps(_lowercase ) )
except Exception as e:
self.fail(f'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 690 |
"""simple docstring"""
def __snake_case ( UpperCamelCase__ ) -> list[int]:
"""simple docstring"""
A = [0 for i in range(len(UpperCamelCase__ ) )]
# initialize interval's left pointer and right pointer
A , A = 0, 0
for i in range(1 , len(UpperCamelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
A = min(right_pointer - i + 1 , z_result[i - left_pointer] )
A = min_edge
while go_next(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
A , A = i, i + z_result[i] - 1
return z_result
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
"""simple docstring"""
return i + z_result[i] < len(UpperCamelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
A = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCamelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 1 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase : Optional[int] = typing.Union[np.floataa, int, float] # noqa: UP007
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(UpperCamelCase__ ) - np.asarray(UpperCamelCase__ )) ** 2 ) )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(UpperCamelCase__ , UpperCamelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def __snake_case ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
benchmark()
| 690 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = LDMTextToImagePipeline
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase = False
def __a ( self : Dict ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A = CLIPTextModel(_lowercase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __a ( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any]=0 ):
if str(_lowercase ).startswith('mps' ):
A = torch.manual_seed(_lowercase )
else:
A = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Any ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = LDMTextToImagePipeline(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_dummy_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
A = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : int , _lowercase : List[Any] , _lowercase : int=torch.floataa , _lowercase : int=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Union[str, Any] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
A = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
A = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Tuple=torch.floataa , _lowercase : Optional[Any]=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : List[str] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images[0]
A = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
A = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 690 | 1 |
"""simple docstring"""
import string
def __snake_case ( UpperCamelCase__ ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
A = ''
for symbol in message:
if symbol in string.ascii_uppercase:
A = string.ascii_uppercase.find(UpperCamelCase__ )
A = num - key
if num < 0:
A = num + len(string.ascii_uppercase )
A = translated + string.ascii_uppercase[num]
else:
A = translated + symbol
print(f'Decryption using Key #{key}: {translated}' )
def __snake_case ( ) -> None:
"""simple docstring"""
A = input('Encrypted message: ' )
A = message.upper()
decrypt(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 690 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase , cache_dir=_lowercase )
A = [t[-1] for t in os.walk(os.path.join(_lowercase , os.listdir(_lowercase )[0] , 'snapshots' ) )]
A = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 4
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(_lowercase , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
A = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_lowercase ) == num_samples
def __a ( self : Dict ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __a ( self : List[str] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : str ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : Any ):
A = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=_lowercase , steps_offset=1 , )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_lowercase , safety_checker=_lowercase , )
A = scheduler.create_state()
A = scheduler_state
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __a ( self : List[str] ):
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.device_count()
A = num_samples * [prompt]
A = jax.random.split(jax.random.PRNGKey(0 ) , _lowercase )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , use_memory_efficient_attention=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 690 | 1 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
UpperCamelCase : Optional[Any] = "facebook/wmt19-en-de"
UpperCamelCase : Optional[Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
UpperCamelCase : List[str] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
UpperCamelCase : Dict = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
UpperCamelCase : Tuple = tokenizer(["Making tiny model"], return_tensors="pt")
UpperCamelCase : Tuple = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
UpperCamelCase : Any = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 690 |
"""simple docstring"""
import os
import sys
UpperCamelCase : Optional[int] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase : Dict = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
"""simple docstring"""
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 690 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : Dict = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """realm"""
def __init__( self : Dict , _lowercase : str=30_522 , _lowercase : List[Any]=768 , _lowercase : Any=128 , _lowercase : Any=12 , _lowercase : Union[str, Any]=12 , _lowercase : Any=8 , _lowercase : str=3_072 , _lowercase : Union[str, Any]="gelu_new" , _lowercase : Any=0.1 , _lowercase : List[str]=0.1 , _lowercase : str=512 , _lowercase : int=2 , _lowercase : str=0.0_2 , _lowercase : Optional[Any]=1e-12 , _lowercase : Tuple=256 , _lowercase : Tuple=10 , _lowercase : Union[str, Any]=1e-3 , _lowercase : Optional[Any]=5 , _lowercase : Optional[Any]=320 , _lowercase : List[str]=13_353_718 , _lowercase : Dict=5_000 , _lowercase : Any=1 , _lowercase : Dict=0 , _lowercase : Any=2 , **_lowercase : List[str] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
# Common config
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = retriever_proj_size
A = num_hidden_layers
A = num_attention_heads
A = num_candidates
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = type_vocab_size
A = layer_norm_eps
# Reader config
A = span_hidden_size
A = max_span_width
A = reader_layer_norm_eps
A = reader_beam_size
A = reader_seq_len
# Retrieval config
A = num_block_records
A = searcher_beam_size
| 690 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : Tuple , _lowercase : bool = True , _lowercase : Optional[Dict[str, int]] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[str] , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Any , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[Any] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : float , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Any , _lowercase : ImageInput , _lowercase : Optional[bool] = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[float] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowercase : Any , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def __a ( self : int , _lowercase : List[str] , _lowercase : List[Tuple] = None ):
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_lowercase ):
A = target_sizes.numpy()
A = []
for idx in range(len(_lowercase ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_lowercase )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 690 | 1 |
"""simple docstring"""
from collections import defaultdict
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> bool:
"""simple docstring"""
A = first_str.lower().strip()
A = second_str.lower().strip()
# Remove whitespace
A = first_str.replace(' ' , '' )
A = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
return False
# Default values for count should be 0
A = defaultdict(UpperCamelCase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(UpperCamelCase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase : Dict = input("Enter the first string ").strip()
UpperCamelCase : Optional[Any] = input("Enter the second string ").strip()
UpperCamelCase : Tuple = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {'' if status else 'not '}anagrams.""")
| 690 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __snake_case ( UpperCamelCase__ = "laptop" ) -> DataFrame:
"""simple docstring"""
A = f'https://www.amazon.in/laptop/s?k={product}'
A = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
A = BeautifulSoup(requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
A = item.ha.text
A = 'https://www.amazon.in/' + item.ha.a['href']
A = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
A = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
A = 'Not available'
try:
A = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
A = ''
try:
A = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
A = float('nan' )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = ' '
A = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCamelCase : Any = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 690 | 1 |
"""simple docstring"""
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
A = [0 for i in range(r + 1 )]
# nc0 = 1
A = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
A = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 690 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : List[str] , _lowercase : Optional[Any] , _lowercase : int=7 , _lowercase : List[str]=3 , _lowercase : Tuple=18 , _lowercase : Dict=30 , _lowercase : Any=400 , _lowercase : int=True , _lowercase : List[Any]=None , _lowercase : Tuple=True , _lowercase : List[Any]=False , _lowercase : str=True , _lowercase : List[str]=True , _lowercase : int=[0.5, 0.5, 0.5] , _lowercase : Optional[int]=[0.5, 0.5, 0.5] , ):
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size if size is not None else {'height': 18, 'width': 20}
A = do_thumbnail
A = do_align_axis
A = do_pad
A = do_normalize
A = image_mean
A = image_std
def __a ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
A = DonutImageProcessingTester(self )
@property
def __a ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Union[str, Any] ):
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , 'do_resize' ) )
self.assertTrue(hasattr(_lowercase , 'size' ) )
self.assertTrue(hasattr(_lowercase , 'do_thumbnail' ) )
self.assertTrue(hasattr(_lowercase , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_lowercase , 'do_pad' ) )
self.assertTrue(hasattr(_lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowercase , 'image_mean' ) )
self.assertTrue(hasattr(_lowercase , 'image_std' ) )
def __a ( self : int ):
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
A = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def __a ( self : Any ):
pass
@is_flaky()
def __a ( self : int ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[str] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[Any] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 690 | 1 |
"""simple docstring"""
import os
import sys
UpperCamelCase : Optional[int] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase : Dict = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
"""simple docstring"""
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 690 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase__ :
def __init__( self : Optional[Any] , _lowercase : int=2 , _lowercase : Optional[Any]=3 , _lowercase : Any=64 , _lowercase : Tuple=None ):
A = np.random.default_rng(_lowercase )
A = length
A = rng.normal(size=(length,) ).astype(np.floataa )
A = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : str ):
return self.length
def __getitem__( self : List[str] , _lowercase : int ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[int] , _lowercase : Any=0 , _lowercase : List[Any]=0 , _lowercase : Optional[int]=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = True
def __a ( self : Optional[Any] , _lowercase : str=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a[0] + self.b[0]
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[Any] , _lowercase : Any=0 , _lowercase : List[str]=0 , _lowercase : str=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = True
def __a ( self : int , _lowercase : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a + self.b
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ = 16 ) -> Optional[Any]:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
A = AutoTokenizer.from_pretrained('bert-base-cased' )
A = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
A = load_dataset('csv' , data_files=UpperCamelCase__ )
A = datasets['train'].unique('label' )
A = {v: i for i, v in enumerate(UpperCamelCase__ )}
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
if "label" in examples:
A = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(UpperCamelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=2 )
A = DataLoader(tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 690 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Tuple=7 , _lowercase : Optional[int]=3 , _lowercase : Tuple=30 , _lowercase : Tuple=400 , _lowercase : List[Any]=True , _lowercase : Optional[Any]=None , _lowercase : Tuple=True , _lowercase : Any=[0.5, 0.5, 0.5] , _lowercase : Optional[int]=[0.5, 0.5, 0.5] , _lowercase : List[str]=True , _lowercase : str=1 / 255 , _lowercase : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
A = parent
A = batch_size
A = num_channels
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = do_normalize
A = image_mean
A = image_std
A = do_rescale
A = rescale_factor
A = do_pad
def __a ( self : Dict ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __a ( self : Dict , _lowercase : List[Any] , _lowercase : Tuple=False ):
if not batched:
A = image_inputs[0]
if isinstance(_lowercase , Image.Image ):
A , A = image.size
else:
A , A = image.shape[1], image.shape[2]
if w < h:
A = int(self.size['shortest_edge'] * h / w )
A = self.size['shortest_edge']
elif w > h:
A = self.size['shortest_edge']
A = int(self.size['shortest_edge'] * w / h )
else:
A = self.size['shortest_edge']
A = self.size['shortest_edge']
else:
A = []
for image in image_inputs:
A , A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A = max(_lowercase , key=lambda _lowercase : item[0] )[0]
A = max(_lowercase , key=lambda _lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = YolosImageProcessor if is_vision_available() else None
def __a ( self : Optional[Any] ):
A = YolosImageProcessingTester(self )
@property
def __a ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Union[str, Any] ):
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , 'image_mean' ) )
self.assertTrue(hasattr(_lowercase , 'image_std' ) )
self.assertTrue(hasattr(_lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowercase , 'do_resize' ) )
self.assertTrue(hasattr(_lowercase , 'size' ) )
def __a ( self : Tuple ):
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad , _lowercase )
A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowercase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _lowercase )
def __a ( self : Union[str, Any] ):
pass
def __a ( self : List[str] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A , A = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self : Dict ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self : Any ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self : Dict ):
# Initialize image_processings
A = self.image_processing_class(**self.image_processor_dict )
A = self.image_processing_class(do_resize=_lowercase , do_normalize=_lowercase , do_rescale=_lowercase )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A = image_processing_a.pad(_lowercase , return_tensors='pt' )
A = image_processing_a(_lowercase , return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1e-4 ) )
@slow
def __a ( self : int ):
# prepare image and target
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
A = json.loads(f.read() )
A = {'image_id': 39_769, 'annotations': target}
# encode them
A = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
A = image_processing(images=_lowercase , annotations=_lowercase , return_tensors='pt' )
# verify pixel values
A = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , _lowercase )
A = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _lowercase , atol=1e-4 ) )
# verify area
A = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _lowercase ) )
# verify boxes
A = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _lowercase )
A = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _lowercase , atol=1e-3 ) )
# verify image_id
A = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _lowercase ) )
# verify is_crowd
A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _lowercase ) )
# verify class_labels
A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _lowercase ) )
# verify orig_size
A = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _lowercase ) )
# verify size
A = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _lowercase ) )
@slow
def __a ( self : int ):
# prepare image, target and masks_path
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
A = json.loads(f.read() )
A = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
A = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
A = YolosImageProcessor(format='coco_panoptic' )
A = image_processing(images=_lowercase , annotations=_lowercase , masks_path=_lowercase , return_tensors='pt' )
# verify pixel values
A = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , _lowercase )
A = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _lowercase , atol=1e-4 ) )
# verify area
A = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _lowercase ) )
# verify boxes
A = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _lowercase )
A = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _lowercase , atol=1e-3 ) )
# verify image_id
A = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _lowercase ) )
# verify is_crowd
A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _lowercase ) )
# verify class_labels
A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _lowercase ) )
# verify masks
A = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _lowercase )
# verify orig_size
A = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _lowercase ) )
# verify size
A = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _lowercase ) )
| 690 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase__ ) -> list[int]: # This function is recursive
"""simple docstring"""
A = len(UpperCamelCase__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
A = array[0]
A = False
A = 1
A = []
while not is_found and i < array_length:
if array[i] < pivot:
A = True
A = [element for element in array[i:] if element >= array[i]]
A = longest_subsequence(UpperCamelCase__ )
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
A = temp_array
else:
i += 1
A = [element for element in array[1:] if element >= pivot]
A = [pivot, *longest_subsequence(UpperCamelCase__ )]
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase : str = logging.getLogger(__name__)
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class lowerCamelCase__ :
lowerCAmelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class lowerCamelCase__ :
lowerCAmelCase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
lowerCAmelCase = field(metadata={"""help""": """Should contain the data files for the task."""} )
lowerCAmelCase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __snake_case ( ) -> Tuple:
"""simple docstring"""
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
try:
A = processors[data_args.task_name]()
A = processor.get_labels()
A = len(UpperCamelCase__ )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
A = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(UpperCamelCase__ ) -> Dict:
A = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCamelCase__ , p.label_ids )}
# Data collator
A = DataCollatorWithPadding(UpperCamelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , data_collator=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(UpperCamelCase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , UpperCamelCase__ , UpperCamelCase__ )
writer.write('%s = %s\n' % (key, value) )
results.update(UpperCamelCase__ )
return results
def __snake_case ( UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 690 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase : Optional[int] = typing.Union[np.floataa, int, float] # noqa: UP007
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(UpperCamelCase__ ) - np.asarray(UpperCamelCase__ )) ** 2 ) )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(UpperCamelCase__ , UpperCamelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def __snake_case ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
benchmark()
| 690 | 1 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __snake_case ( UpperCamelCase__ = "isbn/0140328726" ) -> dict:
"""simple docstring"""
A = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
A = f'{olid} is not a valid Open Library olid'
raise ValueError(UpperCamelCase__ )
return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json()
def __snake_case ( UpperCamelCase__ ) -> dict:
"""simple docstring"""
A = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
A = data['First sentence']['value']
for key, value in data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A = ', '.join(UpperCamelCase__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
UpperCamelCase : int = input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
UpperCamelCase : str = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print("\n".join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 690 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCamelCase : List[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , ) -> Any:
"""simple docstring"""
output_path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , enable_onnx_checker=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
else:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
@torch.no_grad()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> str:
"""simple docstring"""
A = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
A = 'cpu'
A = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=UpperCamelCase__ ).to(UpperCamelCase__ )
A = Path(UpperCamelCase__ )
# TEXT ENCODER
A = pipeline.text_encoder.config.max_position_embeddings
A = pipeline.text_encoder.config.hidden_size
A = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=UpperCamelCase__ , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , )
del pipeline.text_encoder
# UNET
A = pipeline.unet.config.in_channels
A = pipeline.unet.config.sample_size
A = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=UpperCamelCase__ , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , )
A = str(unet_path.absolute().as_posix() )
A = os.path.dirname(UpperCamelCase__ )
A = onnx.load(UpperCamelCase__ )
# clean up existing tensor files
shutil.rmtree(UpperCamelCase__ )
os.mkdir(UpperCamelCase__ )
# collate external tensor files into one
onnx.save_model(
UpperCamelCase__ , UpperCamelCase__ , save_as_external_data=UpperCamelCase__ , all_tensors_to_one_file=UpperCamelCase__ , location='weights.pb' , convert_attribute=UpperCamelCase__ , )
del pipeline.unet
# VAE ENCODER
A = pipeline.vae
A = vae_encoder.config.in_channels
A = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
A = lambda UpperCamelCase__ , UpperCamelCase__ : vae_encoder.encode(UpperCamelCase__ , UpperCamelCase__ )[0].sample()
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
# VAE DECODER
A = pipeline.vae
A = vae_decoder.config.latent_channels
A = vae_decoder.config.out_channels
# forward only through the decoder part
A = vae_encoder.decode
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
A = pipeline.safety_checker
A = safety_checker.config.vision_config.num_channels
A = safety_checker.config.vision_config.image_size
A = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=UpperCamelCase__ , )
del pipeline.safety_checker
A = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
A = pipeline.feature_extractor
else:
A = None
A = None
A = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(UpperCamelCase__ )
print('ONNX pipeline saved to' , UpperCamelCase__ )
del pipeline
del onnx_pipeline
A = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : str = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 690 | 1 |
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def __snake_case ( UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
if hor == 128:
A = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A = (32, 128, 256)
A = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
A = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A = (32, 64, 128, 256)
A = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
A = torch.load(f'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
A = model.state_dict()
A = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
A = UNetaDModel(**UpperCamelCase__ )
print(f'length of state dict: {len(state_dict.keys() )}' )
print(f'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
A = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A = state_dict.pop(UpperCamelCase__ )
hf_value_function.load_state_dict(UpperCamelCase__ )
torch.save(hf_value_function.state_dict() , f'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(f'hub/hopper-medium-v2/unet/hor{hor}/config.json' , 'w' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
A = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
A = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
A = model
A = UNetaDModel(**UpperCamelCase__ )
print(f'length of state dict: {len(state_dict.keys() )}' )
print(f'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
A = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A = state_dict.pop(UpperCamelCase__ )
hf_value_function.load_state_dict(UpperCamelCase__ )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 690 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCamelCase : List[str] = Lock()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A = min(UpperCamelCase__ , UpperCamelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A = max(UpperCamelCase__ , UpperCamelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
A = []
A = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A = temp_rs
A = temp_rr
for i in range(1 , len(UpperCamelCase__ ) - 1 ):
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A = temp_rs
A = temp_rr
process_array_.append(
Process(
target=UpperCamelCase__ , args=(
len(UpperCamelCase__ ) - 1,
arr[len(UpperCamelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCamelCase__ ) ):
A = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __snake_case ( ) -> Optional[Any]:
"""simple docstring"""
A = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*UpperCamelCase__ )
A = odd_even_transposition(UpperCamelCase__ )
print('Sorted List\n' )
print(*UpperCamelCase__ )
if __name__ == "__main__":
main()
| 690 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase : str = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : int = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 690 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
UpperCamelCase : int = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
UpperCamelCase : List[Any] = dataset.iloc[:, 1:2].values
UpperCamelCase : Any = dataset.iloc[:, 2].values
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = train_test_split(X, y, test_size=0.2, random_state=0)
UpperCamelCase : List[str] = PolynomialFeatures(degree=4)
UpperCamelCase : Optional[int] = poly_reg.fit_transform(X)
UpperCamelCase : List[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
plt.scatter(UpperCamelCase__ , UpperCamelCase__ , color='red' )
plt.plot(UpperCamelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase__ ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 690 | 1 |
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = 0
for ch in input_str:
__magic_name__ :int = ord(snake_case )
__magic_name__ :Dict = pow(2, snake_case )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : List[str] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = True , **_lowercase : Tuple , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A = image_std if image_std is not None else OPENAI_CLIP_STD
A = do_convert_rgb
def __a ( self : str , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Union[str, Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Optional[int] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowercase : int , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , param_name='size' , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' , default_to_square=_lowercase )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A = [convert_to_rgb(_lowercase ) for image in images]
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 690 | 0 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
__snake_case = get_tests_dir('''fixtures/dummy-config.json''')
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = 0
def snake_case_ ( self: str ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(A_,A_ )
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
self.assertIsInstance(A_,A_ )
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
self.assertIsInstance(A_,A_ )
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = AutoConfig.for_model('roberta' )
self.assertIsInstance(A_,A_ )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__UpperCamelCase = os.path.join(A_,'fake-roberta' )
os.makedirs(A_,exist_ok=A_ )
with open(os.path.join(A_,'config.json' ),'w' ) as f:
f.write(json.dumps({} ) )
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
self.assertEqual(type(A_ ),A_ )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
try:
AutoConfig.register('custom',A_ )
# Wrong model type will raise an error
with self.assertRaises(A_ ):
AutoConfig.register('model',A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
AutoConfig.register('bert',A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCamelCase = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A_ )
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
self.assertIsInstance(A_,A_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def snake_case_ ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
A_,'bert-base is not a local folder and is not a valid model identifier' ):
__UpperCamelCase = AutoConfig.from_pretrained('bert-base' )
def snake_case_ ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
A_,r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__UpperCamelCase = AutoConfig.from_pretrained(A_,revision='aaaaaa' )
def snake_case_ ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
A_,'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.',):
__UpperCamelCase = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
with self.assertRaises(A_ ):
__UpperCamelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A_ ):
__UpperCamelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model',trust_remote_code=A_ )
__UpperCamelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model',trust_remote_code=A_ )
self.assertEqual(config.__class__.__name__,'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A_ )
__UpperCamelCase = AutoConfig.from_pretrained(A_,trust_remote_code=A_ )
self.assertEqual(reloaded_config.__class__.__name__,'NewModelConfig' )
def snake_case_ ( self: str ):
'''simple docstring'''
class __lowerCamelCase (_a ):
_lowercase = """new-model"""
try:
AutoConfig.register('new-model',A_ )
# If remote code is not set, the default is to use local
__UpperCamelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__,'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
__UpperCamelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model',trust_remote_code=A_ )
self.assertEqual(config.__class__.__name__,'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
__UpperCamelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model',trust_remote_code=A_ )
self.assertEqual(config.__class__.__name__,'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 1 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowercase )
try:
pickle.loads(pickle.dumps(_lowercase ) )
except Exception as e:
self.fail(f'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 690 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """spiece.model"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
UpperCAmelCase_ = {
"""albert-base-v1""": 5_1_2,
"""albert-large-v1""": 5_1_2,
"""albert-xlarge-v1""": 5_1_2,
"""albert-xxlarge-v1""": 5_1_2,
"""albert-base-v2""": 5_1_2,
"""albert-large-v2""": 5_1_2,
"""albert-xlarge-v2""": 5_1_2,
"""albert-xxlarge-v2""": 5_1_2,
}
UpperCAmelCase_ = """▁"""
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Tuple = VOCAB_FILES_NAMES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]="[CLS]" , __lowerCAmelCase : List[Any]="[SEP]" , __lowerCAmelCase : List[str]="<unk>" , __lowerCAmelCase : str="[SEP]" , __lowerCAmelCase : Optional[Any]="<pad>" , __lowerCAmelCase : int="[CLS]" , __lowerCAmelCase : List[str]="[MASK]" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : List[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_A = (
AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
else mask_token
)
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_A = do_lower_case
_A = remove_space
_A = keep_accents
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def snake_case_ ( self : List[Any] ) -> int:
return len(self.sp_model )
def snake_case_ ( self : Optional[int] ) -> List[str]:
_A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> str:
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : Dict , __lowerCAmelCase : Dict ) -> Tuple:
_A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self : str , __lowerCAmelCase : str ) -> Any:
if self.remove_space:
_A = ''' '''.join(inputs.strip().split() )
else:
_A = inputs
_A = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
_A = unicodedata.normalize('''NFKD''' , __lowerCAmelCase )
_A = ''''''.join([c for c in outputs if not unicodedata.combining(__lowerCAmelCase )] )
if self.do_lower_case:
_A = outputs.lower()
return outputs
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> List[str]:
_A = self.preprocess_text(__lowerCAmelCase )
_A = self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
_A = []
for piece in pieces:
if len(__lowerCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
_A = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCAmelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_A = cur_pieces[1:]
else:
_A = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCAmelCase )
else:
new_pieces.append(__lowerCAmelCase )
return new_pieces
def snake_case_ ( self : str , __lowerCAmelCase : Optional[int] ) -> Dict:
return self.sp_model.PieceToId(__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] ) -> List[Any]:
return self.sp_model.IdToPiece(__lowerCAmelCase )
def snake_case_ ( self : Dict , __lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
_A = []
_A = ''''''
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
_A = True
_A = []
else:
current_sub_tokens.append(__lowerCAmelCase )
_A = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def snake_case_ ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case_ ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
def snake_case_ ( self : str , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 2 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """convbert"""
def __init__( self : Optional[int] , _lowercase : List[Any]=30_522 , _lowercase : List[str]=768 , _lowercase : Optional[Any]=12 , _lowercase : Any=12 , _lowercase : str=3_072 , _lowercase : List[str]="gelu" , _lowercase : Dict=0.1 , _lowercase : Dict=0.1 , _lowercase : Any=512 , _lowercase : List[str]=2 , _lowercase : Tuple=0.0_2 , _lowercase : List[Any]=1e-12 , _lowercase : List[str]=1 , _lowercase : Tuple=0 , _lowercase : Any=2 , _lowercase : Union[str, Any]=768 , _lowercase : str=2 , _lowercase : Any=9 , _lowercase : Union[str, Any]=1 , _lowercase : Dict=None , **_lowercase : Union[str, Any] , ):
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = embedding_size
A = head_ratio
A = conv_kernel_size
A = num_groups
A = classifier_dropout
class lowerCamelCase__ ( UpperCAmelCase_ ):
@property
def __a ( self : str ):
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 690 | 0 |
'''simple docstring'''
import os
import pytest
from attr import dataclass
lowerCAmelCase : Dict = 'us-east-1' # defaults region
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = """arn:aws:iam::558105141721:role/sagemaker_execution_role"""
lowerCAmelCase_ = {
"""task_name""": """mnli""",
"""per_device_train_batch_size""": 16,
"""per_device_eval_batch_size""": 16,
"""do_train""": True,
"""do_eval""": True,
"""do_predict""": True,
"""output_dir""": """/opt/ml/model""",
"""overwrite_output_dir""": True,
"""max_steps""": 5_00,
"""save_steps""": 55_00,
}
lowerCAmelCase_ = {**hyperparameters, """max_steps""": 10_00}
@property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
return F'''{self.framework}-transfromers-test'''
@property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
return F'''./tests/sagemaker/scripts/{self.framework}'''
@property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class')
def A_( A : Optional[int]):
UpperCamelCase = SageMakerTestEnvironment(framework=request.cls.framework)
| 3 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 690 | 0 |
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ):
# Load checkpoint
lowerCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' )
lowerCAmelCase = chkpt['model']
# We have the base model one level deeper than the original XLM repository
lowerCAmelCase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowerCAmelCase = v
else:
lowerCAmelCase = v
lowerCAmelCase = chkpt['params']
lowerCAmelCase = {n: v for n, v in config.items() if not isinstance(_UpperCAmelCase , (torch.FloatTensor, numpy.ndarray) )}
lowerCAmelCase = chkpt['dico_word2id']
lowerCAmelCase = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
lowerCAmelCase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCAmelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
lowerCAmelCase = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCAmelCase , indent=2 ) + '\n' )
print(F'Save vocab file to {pytorch_config_dump_path}' )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCAmelCase , indent=2 ) + '\n' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 4 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
A = {'+', '-', '*', '/'}
A = []
for token in postfix_notation:
if token in operations:
A , A = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 0 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self , _lowercase=None , _lowercase=None , _lowercase=None , **_lowercase ):
"""simple docstring"""
if tokenize_kwargs is None:
_lowerCAmelCase = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
_lowerCAmelCase = truncation
_lowerCAmelCase = tokenize_kwargs
_lowerCAmelCase = {}
if return_tensors is not None:
_lowerCAmelCase = return_tensors
return preprocess_params, {}, postprocess_params
def _lowercase ( self , _lowercase , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.framework
_lowerCAmelCase = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
return model_inputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.model(**_lowercase )
return model_outputs
def _lowercase ( self , _lowercase , _lowercase=False ):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
return super().__call__(*_lowercase , **_lowercase )
| 5 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase : Any = None
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : str = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Optional[int] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCamelCase : str = "▁"
# Segments (not really needed)
UpperCamelCase : str = 0
UpperCamelCase : int = 1
UpperCamelCase : List[Any] = 2
UpperCamelCase : Union[str, Any] = 3
UpperCamelCase : Optional[Any] = 4
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = """left"""
lowerCAmelCase = XLNetTokenizer
def __init__( self : Tuple , _lowercase : List[Any]=None , _lowercase : Any=None , _lowercase : int=False , _lowercase : Tuple=True , _lowercase : Union[str, Any]=False , _lowercase : int="<s>" , _lowercase : Optional[int]="</s>" , _lowercase : Dict="<unk>" , _lowercase : Optional[int]="<sep>" , _lowercase : int="<pad>" , _lowercase : Dict="<cls>" , _lowercase : str="<mask>" , _lowercase : List[str]=["<eop>", "<eod>"] , **_lowercase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
A = 3
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def __a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_lowerCamelCase = ['gpt2']
_lowerCamelCase = 'gpt2'
if is_tf_available():
class UpperCamelCase_ ( tf.Module ):
def __init__( self :Optional[Any] , __A :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = tokenizer
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__A )
SCREAMING_SNAKE_CASE__ = TFGPTaLMHeadModel.from_config(__A )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) )
def _snake_case ( self :Optional[int] , __A :int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A )
SCREAMING_SNAKE_CASE__ = tokenized["""input_ids"""].to_tensor()
SCREAMING_SNAKE_CASE__ = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
SCREAMING_SNAKE_CASE__ = self.model(input_ids=__A , attention_mask=__A )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :List[Any] ) -> Dict:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = [GPTaTokenizer.from_pretrained(__A ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
SCREAMING_SNAKE_CASE__ = [TFGPTaTokenizer.from_pretrained(__A ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
SCREAMING_SNAKE_CASE__ = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
SCREAMING_SNAKE_CASE__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE__ = tokenizer([test_inputs] , return_tensors="""tf""" )
SCREAMING_SNAKE_CASE__ = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
SCREAMING_SNAKE_CASE__ = python_outputs[key].numpy()
SCREAMING_SNAKE_CASE__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__A , tf.intaa ) == tf_outputs_values ) )
@slow
def _snake_case ( self :Any ) -> int:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE__ = tf.function(__A )
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE__ = tf.constant(__A )
SCREAMING_SNAKE_CASE__ = compiled_tokenizer(__A )
SCREAMING_SNAKE_CASE__ = tf_tokenizer(__A )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _snake_case ( self :List[Any] ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE__ = ModelToSave(tokenizer=__A )
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE__ = model.serving(__A ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE__ = Path(__A ) / """saved.model"""
tf.saved_model.save(__A , __A , signatures={"""serving_default""": model.serving} )
SCREAMING_SNAKE_CASE__ = tf.saved_model.load(__A )
SCREAMING_SNAKE_CASE__ = loaded_model.signatures["""serving_default"""](__A )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _snake_case ( self :Dict ) -> int:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE__ = tf_tokenizer(__A ) # Build model with some sample inputs
SCREAMING_SNAKE_CASE__ = tf_tokenizer.get_config()
SCREAMING_SNAKE_CASE__ = TFGPTaTokenizer.from_config(__A )
SCREAMING_SNAKE_CASE__ = model_from_config(__A )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _snake_case ( self :List[Any] ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
SCREAMING_SNAKE_CASE__ = 12_3123
for max_length in [3, 5, 1024]:
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE__ = tf_tokenizer(__A , max_length=__A )
SCREAMING_SNAKE_CASE__ = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length | 6 |
"""simple docstring"""
from __future__ import annotations
UpperCamelCase : Any = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the reference grid
A = 1
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the action grid
A = init[0]
A = init[1]
A = 0
A = g + heuristic[x][y] # cost from starting cell to destination cell
A = [[f, g, x, y]]
A = False # flag that is set when search is complete
A = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A = cell.pop()
A = next_cell[2]
A = next_cell[3]
A = next_cell[1]
if x == goal[0] and y == goal[1]:
A = True
else:
for i in range(len(UpperCamelCase__ ) ): # to try out different valid actions
A = x + DIRECTIONS[i][0]
A = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A = g + cost
A = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A = 1
A = i
A = []
A = goal[0]
A = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A = x - DIRECTIONS[action[x][y]][0]
A = y - DIRECTIONS[action[x][y]][1]
A = xa
A = ya
invpath.append([x, y] )
A = []
for i in range(len(UpperCamelCase__ ) ):
path.append(invpath[len(UpperCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase : Any = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase : List[Any] = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase : Tuple = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase : Union[str, Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase : List[str] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase : Dict = 99
UpperCamelCase , UpperCamelCase : Optional[Any] = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 690 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _snake_case ( _snake_case : str = "AAPL" ) -> str:
'''simple docstring'''
_A = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A = BeautifulSoup(requests.get(_snake_case ).text , 'html.parser' )
_A = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 7 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : int = {"vocab_file": "sentencepiece.model"}
UpperCamelCase : Union[str, Any] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
UpperCamelCase : Union[str, Any] = {
"google/rembert": 256,
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Dict=True , _lowercase : List[str]=True , _lowercase : int="[CLS]" , _lowercase : str="[SEP]" , _lowercase : List[str]="[UNK]" , _lowercase : List[Any]="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : List[str]="[CLS]" , _lowercase : Any="[MASK]" , **_lowercase : Optional[Any] , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def __a ( self : Tuple ):
return len(self.sp_model )
def __a ( self : List[str] ):
A = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : List[str] , _lowercase : int ):
A = d
A = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __a ( self : Dict , _lowercase : Union[str, Any] , _lowercase : Dict=False ):
A = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def __a ( self : Dict , _lowercase : Tuple ):
return self.sp_model.PieceToId(_lowercase )
def __a ( self : str , _lowercase : Optional[int] ):
return self.sp_model.IdToPiece(_lowercase )
def __a ( self : Optional[int] , _lowercase : Optional[int] ):
A = self.sp_model.decode_pieces(_lowercase )
return out_string
def __a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def __a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('Vocabulary path ({}) should be a directory'.format(_lowercase ) )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 0 |
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowercase__ : List[str] = Mapping[str, np.ndarray]
lowercase__ : Dict = Mapping[str, Any] # Is a nested dict.
lowercase__ : Dict = 0.01
@dataclasses.dataclass(frozen=a__ )
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCAmelCase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCAmelCase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCAmelCase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCAmelCase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCAmelCase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCAmelCase = None
# Templates used to generate this protein (prediction-only)
lowerCAmelCase = None
# Chain corresponding to each parent
lowerCAmelCase = None
def _lowerCAmelCase ( __snake_case : str ) -> Protein:
__A : Optional[Any] = r'(\[[A-Z]+\]\n)'
__A : List[str] = [tag.strip() for tag in re.split(__snake_case , __snake_case ) if len(__snake_case ) > 0]
__A : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] )
__A : List[str] = ["N", "CA", "C"]
__A : int = None
__A : Dict = None
__A : Tuple = None
for g in groups:
if "[PRIMARY]" == g[0]:
__A : Optional[int] = g[1][0].strip()
for i in range(len(__snake_case ) ):
if seq[i] not in residue_constants.restypes:
__A : Optional[int] = 'X' # FIXME: strings are immutable
__A : int = np.array(
[residue_constants.restype_order.get(__snake_case , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
__A : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__snake_case , g[1][axis].split() ) ) )
__A : List[str] = np.array(__snake_case )
__A : Tuple = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__snake_case ):
__A : Dict = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
__A : Dict = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) )
__A : Any = np.zeros(
(
len(__snake_case ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__snake_case ):
__A : Tuple = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__snake_case , atom_mask=__snake_case , aatype=__snake_case , residue_index=np.arange(len(__snake_case ) ) , b_factors=__snake_case , )
def _lowerCAmelCase ( __snake_case : Protein , __snake_case : int = 0 ) -> List[str]:
__A : List[str] = []
__A : Dict = prot.remark
if remark is not None:
pdb_headers.append(f'REMARK {remark}' )
__A : Optional[int] = prot.parents
__A : List[Any] = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
__A : Tuple = [p for i, p in zip(__snake_case , __snake_case ) if i == chain_id]
if parents is None or len(__snake_case ) == 0:
__A : List[str] = ['N/A']
pdb_headers.append(f'PARENT {" ".join(__snake_case )}' )
return pdb_headers
def _lowerCAmelCase ( __snake_case : Protein , __snake_case : str ) -> str:
__A : List[str] = []
__A : Union[str, Any] = pdb_str.split('\n' )
__A : Tuple = prot.remark
if remark is not None:
out_pdb_lines.append(f'REMARK {remark}' )
__A : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
__A : List[Any] = []
if prot.parents_chain_index is not None:
__A : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__snake_case ) , [] )
parent_dict[str(__snake_case )].append(__snake_case )
__A : Dict = max([int(__snake_case ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
__A : Any = parent_dict.get(str(__snake_case ) , ['N/A'] )
parents_per_chain.append(__snake_case )
else:
parents_per_chain.append(list(prot.parents ) )
else:
__A : Any = [['N/A']]
def make_parent_line(__snake_case : Sequence[str] ) -> str:
return f'PARENT {" ".join(__snake_case )}'
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
__A : Any = 0
for i, l in enumerate(__snake_case ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__snake_case )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__snake_case ):
__A : Union[str, Any] = parents_per_chain[chain_counter]
else:
__A : List[Any] = ['N/A']
out_pdb_lines.append(make_parent_line(__snake_case ) )
return "\n".join(__snake_case )
def _lowerCAmelCase ( __snake_case : Protein ) -> str:
__A : List[str] = residue_constants.restypes + ['X']
def res_atoa(__snake_case : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , 'UNK' )
__A : List[str] = residue_constants.atom_types
__A : List[str] = []
__A : Dict = prot.atom_mask
__A : Any = prot.aatype
__A : Tuple = prot.atom_positions
__A : Any = prot.residue_index.astype(np.intaa )
__A : List[Any] = prot.b_factors
__A : Any = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
__A : str = get_pdb_headers(__snake_case )
if len(__snake_case ) > 0:
pdb_lines.extend(__snake_case )
__A : int = aatype.shape[0]
__A : Tuple = 1
__A : int = 0
__A : Union[str, Any] = string.ascii_uppercase
__A : Optional[Any] = None
# Add all atom sites.
for i in range(__snake_case ):
__A : Union[str, Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__snake_case , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
__A : Any = 'ATOM'
__A : Tuple = atom_name if len(__snake_case ) == 4 else f' {atom_name}'
__A : Tuple = ''
__A : Tuple = ''
__A : Any = 1.00
__A : Any = atom_name[0] # Protein supports only C, N, O, S, this works.
__A : int = ''
__A : Tuple = 'A'
if chain_index is not None:
__A : Tuple = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
__A : Any = (
f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'
f'{res_name_a:>3} {chain_tag:>1}'
f'{residue_index[i]:>4}{insertion_code:>1} '
f'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'
f'{occupancy:>6.2f}{b_factor:>6.2f} '
f'{element:>2}{charge:>2}'
)
pdb_lines.append(__snake_case )
atom_index += 1
__A : str = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
__A : str = True
__A : List[Any] = chain_index[i + 1]
if should_terminate:
# Close the chain.
__A : Any = 'TER'
__A : List[Any] = (
f'{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'
)
pdb_lines.append(__snake_case )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__snake_case , __snake_case ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(__snake_case )
def _lowerCAmelCase ( __snake_case : Protein ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _lowerCAmelCase ( __snake_case : FeatureDict , __snake_case : ModelOutput , __snake_case : Optional[np.ndarray] = None , __snake_case : Optional[np.ndarray] = None , __snake_case : Optional[str] = None , __snake_case : Optional[Sequence[str]] = None , __snake_case : Optional[Sequence[int]] = None , ) -> Protein:
return Protein(
aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=__snake_case , remark=__snake_case , parents=__snake_case , parents_chain_index=__snake_case , ) | 8 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : List[Any] = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
UpperCamelCase : Any = {"mobilebert-uncased": 512}
UpperCamelCase : Any = {}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = MobileBertTokenizer
def __init__( self : Optional[int] , _lowercase : Optional[int]=None , _lowercase : Any=None , _lowercase : Optional[int]=True , _lowercase : int="[UNK]" , _lowercase : Dict="[SEP]" , _lowercase : Any="[PAD]" , _lowercase : str="[CLS]" , _lowercase : Union[str, Any]="[MASK]" , _lowercase : List[Any]=True , _lowercase : Any=None , **_lowercase : Optional[Any] , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowercase ) != tokenize_chinese_chars
):
A = getattr(_lowercase , normalizer_state.pop('type' ) )
A = do_lower_case
A = strip_accents
A = tokenize_chinese_chars
A = normalizer_class(**_lowercase )
A = do_lower_case
def __a ( self : List[Any] , _lowercase : Tuple , _lowercase : Any=None ):
A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 690 | 0 |
def A ( ) -> int:
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(__UpperCamelCase , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'{solution() = }')
| 9 |
"""simple docstring"""
def __snake_case ( UpperCamelCase__ ) -> list[int]:
"""simple docstring"""
A = [0 for i in range(len(UpperCamelCase__ ) )]
# initialize interval's left pointer and right pointer
A , A = 0, 0
for i in range(1 , len(UpperCamelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
A = min(right_pointer - i + 1 , z_result[i - left_pointer] )
A = min_edge
while go_next(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
A , A = i, i + z_result[i] - 1
return z_result
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
"""simple docstring"""
return i + z_result[i] < len(UpperCamelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
A = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCamelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 0 |
def _snake_case ( __snake_case ):
_UpperCamelCase = int(__snake_case )
if decimal in (0, 1): # Exit cases for the recursion
return str(__snake_case )
_UpperCamelCase , _UpperCamelCase = divmod(__snake_case , 2 )
return binary_recursive(__snake_case ) + str(__snake_case )
def _snake_case ( __snake_case ):
_UpperCamelCase = str(__snake_case ).strip()
if not number:
raise ValueError('''No input value was provided''' )
_UpperCamelCase = '''-''' if number.startswith('''-''' ) else ''''''
_UpperCamelCase = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f"""{negative}0b{binary_recursive(int(__snake_case ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = LDMTextToImagePipeline
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase = False
def __a ( self : Dict ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A = CLIPTextModel(_lowercase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __a ( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any]=0 ):
if str(_lowercase ).startswith('mps' ):
A = torch.manual_seed(_lowercase )
else:
A = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Any ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = LDMTextToImagePipeline(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_dummy_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
A = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : int , _lowercase : List[Any] , _lowercase : int=torch.floataa , _lowercase : int=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Union[str, Any] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
A = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
A = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Tuple=torch.floataa , _lowercase : Optional[Any]=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : List[str] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images[0]
A = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
A = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 690 | 0 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def lowerCAmelCase (__A):
"""simple docstring"""
for i in range(0 , __A):
for _ in range(0 , n - i - 1): # printing spaces
print(''' ''' , end='''''')
for _ in range(0 , i + 1): # printing stars
print('''* ''' , end='''''')
print()
def lowerCAmelCase (__A):
"""simple docstring"""
for i in range(__A , 0 , -1):
for _ in range(__A , 0 , -1): # printing stars
print('''* ''' , end='''''')
print()
for _ in range(n - i + 1 , 0 , -1): # printing spaces
print(''' ''' , end='''''')
def lowerCAmelCase (__A):
"""simple docstring"""
if n <= 0:
print(''' ... .... nothing printing :(''')
return
floyd(__A) # upper half
reverse_floyd(__A) # lower half
if __name__ == "__main__":
print(R"| /\ | |- | |- |--| |\ /| |-")
print(R"|/ \| |- |_ |_ |__| | \/ | |_")
lowercase_ = 1
while K:
lowercase_ = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
lowercase_ = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 11 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase , cache_dir=_lowercase )
A = [t[-1] for t in os.walk(os.path.join(_lowercase , os.listdir(_lowercase )[0] , 'snapshots' ) )]
A = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 4
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(_lowercase , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
A = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_lowercase ) == num_samples
def __a ( self : Dict ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __a ( self : List[str] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : str ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : Any ):
A = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=_lowercase , steps_offset=1 , )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_lowercase , safety_checker=_lowercase , )
A = scheduler.create_state()
A = scheduler_state
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __a ( self : List[str] ):
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.device_count()
A = num_samples * [prompt]
A = jax.random.split(jax.random.PRNGKey(0 ) , _lowercase )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , use_memory_efficient_attention=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 690 | 0 |
import numpy as np
class _snake_case :
def __init__( self):
'''simple docstring'''
lowercase__ : Optional[int] = (0, 0)
lowercase__ : Optional[int] = None
lowercase__ : Optional[int] = 0
lowercase__ : Optional[Any] = 0
lowercase__ : Tuple = 0
def __eq__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.position == cell.position
def lowercase__ ( self):
'''simple docstring'''
print(self.position)
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_=(5, 5)):
'''simple docstring'''
lowercase__ : List[Any] = np.zeros(SCREAMING_SNAKE_CASE_)
lowercase__ : int = world_size[0]
lowercase__ : Tuple = world_size[1]
def lowercase__ ( self):
'''simple docstring'''
print(self.w)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Any = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
lowercase__ : int = cell.position[0]
lowercase__ : Optional[Any] = cell.position[1]
lowercase__ : Tuple = []
for n in neughbour_cord:
lowercase__ : List[Any] = current_x + n[0]
lowercase__ : List[str] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
lowercase__ : Tuple = Cell()
lowercase__ : str = (x, y)
lowercase__ : int = cell
neighbours.append(SCREAMING_SNAKE_CASE_)
return neighbours
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : List[str] = []
lowercase__ : int = []
_open.append(lowercase_ )
while _open:
lowercase__ : Tuple = np.argmin([n.f for n in _open] )
lowercase__ : List[str] = _open[min_f]
_closed.append(_open.pop(lowercase_ ) )
if current == goal:
break
for n in world.get_neigbours(lowercase_ ):
for c in _closed:
if c == n:
continue
lowercase__ : Dict = current.g + 1
lowercase__ , lowercase__ : Dict = n.position
lowercase__ , lowercase__ : int = goal.position
lowercase__ : Optional[int] = (ya - ya) ** 2 + (xa - xa) ** 2
lowercase__ : List[str] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(lowercase_ )
lowercase__ : Tuple = []
while current.parent is not None:
path.append(current.position )
lowercase__ : Dict = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = Gridworld()
# Start position and goal
lowerCamelCase__ : List[Any] = Cell()
lowerCamelCase__ : Dict = (0, 0)
lowerCamelCase__ : Dict = Cell()
lowerCamelCase__ : str = (4, 4)
print(f'''path from {start.position} to {goal.position}''')
lowerCamelCase__ : Tuple = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowerCamelCase__ : Union[str, Any] = 1
print(world.w)
| 12 |
"""simple docstring"""
import os
import sys
UpperCamelCase : Optional[int] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase : Dict = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
"""simple docstring"""
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 690 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : List[str] = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
A__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : Tuple , _lowercase : bool = True , _lowercase : Optional[Dict[str, int]] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[str] , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Any , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[Any] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : float , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Any , _lowercase : ImageInput , _lowercase : Optional[bool] = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[float] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowercase : Any , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def __a ( self : int , _lowercase : List[str] , _lowercase : List[Tuple] = None ):
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_lowercase ):
A = target_sizes.numpy()
A = []
for idx in range(len(_lowercase ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_lowercase )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 690 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a__ = logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , **_a ) -> List[str]:
super().__init__(**_a )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(_a )
def __call__( self , _a , _a = None , **_a , ) -> List[str]:
if "text_queries" in kwargs:
_a : Optional[Any] = kwargs.pop('''text_queries''' )
if isinstance(_a , (str, Image.Image) ):
_a : Tuple = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
_a : str = image
_a : int = super().__call__(_a , **_a )
return results
def __lowercase ( self , **_a ) -> Union[str, Any]:
_a : Union[str, Any] = {}
if "threshold" in kwargs:
_a : Dict = kwargs['''threshold''']
if "top_k" in kwargs:
_a : Tuple = kwargs['''top_k''']
return {}, {}, postprocess_params
def __lowercase ( self , _a ) -> List[str]:
_a : int = load_image(inputs['''image'''] )
_a : Union[str, Any] = inputs['''candidate_labels''']
if isinstance(_a , _a ):
_a : Tuple = candidate_labels.split(''',''' )
_a : Dict = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_a ):
_a : Union[str, Any] = self.tokenizer(_a , return_tensors=self.framework )
_a : Optional[Any] = self.image_processor(_a , return_tensors=self.framework )
yield {
"is_last": i == len(_a ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __lowercase ( self , _a ) -> str:
_a : Optional[int] = model_inputs.pop('''target_size''' )
_a : int = model_inputs.pop('''candidate_label''' )
_a : str = model_inputs.pop('''is_last''' )
_a : Union[str, Any] = self.model(**_a )
_a : List[str] = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def __lowercase ( self , _a , _a=0.1 , _a=None ) -> Any:
_a : str = []
for model_output in model_outputs:
_a : str = model_output['''candidate_label''']
_a : int = BaseModelOutput(_a )
_a : Tuple = self.image_processor.post_process_object_detection(
outputs=_a , threshold=_a , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
_a : Optional[Any] = outputs['''scores'''][index].item()
_a : List[str] = self._get_bounding_box(outputs['''boxes'''][index][0] )
_a : Dict = {'''score''': score, '''label''': label, '''box''': box}
results.append(_a )
_a : str = sorted(_a , key=lambda _a : x["score"] , reverse=_a )
if top_k:
_a : Any = results[:top_k]
return results
def __lowercase ( self , _a ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
_a , _a , _a , _a : Dict = box.int().tolist()
_a : int = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 14 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __snake_case ( UpperCamelCase__ = "laptop" ) -> DataFrame:
"""simple docstring"""
A = f'https://www.amazon.in/laptop/s?k={product}'
A = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
A = BeautifulSoup(requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
A = item.ha.text
A = 'https://www.amazon.in/' + item.ha.a['href']
A = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
A = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
A = 'Not available'
try:
A = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
A = ''
try:
A = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
A = float('nan' )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = ' '
A = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCamelCase : Any = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 690 | 0 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
lowercase__ , lowercase__ = get_aligned_output_features_output_indices(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ["""c"""] )
self.assertEqual(_UpperCAmelCase , [2] )
# Out indices set to match out features
lowercase__ , lowercase__ = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ["""a""", """c"""] )
self.assertEqual(_UpperCAmelCase , [0, 2] )
# Out features set to match out indices
lowercase__ , lowercase__ = get_aligned_output_features_output_indices(_UpperCAmelCase , [0, 2] , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ["""a""", """c"""] )
self.assertEqual(_UpperCAmelCase , [0, 2] )
# Out features selected from negative indices
lowercase__ , lowercase__ = get_aligned_output_features_output_indices(_UpperCAmelCase , [-3, -1] , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ["""a""", """c"""] )
self.assertEqual(_UpperCAmelCase , [-3, -1] )
def lowerCamelCase__ (self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCAmelCase )
# Out features must be a list
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(_UpperCAmelCase , 0 , ["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(_UpperCAmelCase , (0, 1) , ["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] )
def lowerCamelCase__ (self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = BackboneMixin()
lowercase__ = ["""a""", """b""", """c"""]
lowercase__ = ["""a""", """c"""]
lowercase__ = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowercase__ = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowercase__ = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 15 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : List[str] , _lowercase : Optional[Any] , _lowercase : int=7 , _lowercase : List[str]=3 , _lowercase : Tuple=18 , _lowercase : Dict=30 , _lowercase : Any=400 , _lowercase : int=True , _lowercase : List[Any]=None , _lowercase : Tuple=True , _lowercase : List[Any]=False , _lowercase : str=True , _lowercase : List[str]=True , _lowercase : int=[0.5, 0.5, 0.5] , _lowercase : Optional[int]=[0.5, 0.5, 0.5] , ):
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size if size is not None else {'height': 18, 'width': 20}
A = do_thumbnail
A = do_align_axis
A = do_pad
A = do_normalize
A = image_mean
A = image_std
def __a ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
A = DonutImageProcessingTester(self )
@property
def __a ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Union[str, Any] ):
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , 'do_resize' ) )
self.assertTrue(hasattr(_lowercase , 'size' ) )
self.assertTrue(hasattr(_lowercase , 'do_thumbnail' ) )
self.assertTrue(hasattr(_lowercase , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_lowercase , 'do_pad' ) )
self.assertTrue(hasattr(_lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowercase , 'image_mean' ) )
self.assertTrue(hasattr(_lowercase , 'image_std' ) )
def __a ( self : int ):
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
A = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def __a ( self : Any ):
pass
@is_flaky()
def __a ( self : int ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[str] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[Any] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 690 | 0 |
def __a ( A__ : int , A__ : int , A__ : list[list[int]] ):
def update_area_of_max_square(A__ : int , A__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
SCREAMING_SNAKE_CASE = update_area_of_max_square(A__ , col + 1 )
SCREAMING_SNAKE_CASE = update_area_of_max_square(row + 1 , col + 1 )
SCREAMING_SNAKE_CASE = update_area_of_max_square(row + 1 , A__ )
if mat[row][col]:
SCREAMING_SNAKE_CASE = 1 + min([right, diagonal, down] )
SCREAMING_SNAKE_CASE = max(largest_square_area[0] , A__ )
return sub_problem_sol
else:
return 0
SCREAMING_SNAKE_CASE = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __a ( A__ : int , A__ : int , A__ : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
A__ : int , A__ : int , A__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
SCREAMING_SNAKE_CASE = update_area_of_max_square_using_dp_array(A__ , col + 1 , A__ )
SCREAMING_SNAKE_CASE = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , A__ )
SCREAMING_SNAKE_CASE = update_area_of_max_square_using_dp_array(row + 1 , A__ , A__ )
if mat[row][col]:
SCREAMING_SNAKE_CASE = 1 + min([right, diagonal, down] )
SCREAMING_SNAKE_CASE = max(largest_square_area[0] , A__ )
SCREAMING_SNAKE_CASE = sub_problem_sol
return sub_problem_sol
else:
return 0
SCREAMING_SNAKE_CASE = [0]
SCREAMING_SNAKE_CASE = [[-1] * cols for _ in range(A__ )]
update_area_of_max_square_using_dp_array(0 , 0 , A__ )
return largest_square_area[0]
def __a ( A__ : int , A__ : int , A__ : list[list[int]] ):
SCREAMING_SNAKE_CASE = [[0] * (cols + 1) for _ in range(rows + 1 )]
SCREAMING_SNAKE_CASE = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
SCREAMING_SNAKE_CASE = dp_array[row][col + 1]
SCREAMING_SNAKE_CASE = dp_array[row + 1][col + 1]
SCREAMING_SNAKE_CASE = dp_array[row + 1][col]
if mat[row][col] == 1:
SCREAMING_SNAKE_CASE = 1 + min(A__ , A__ , A__ )
SCREAMING_SNAKE_CASE = max(dp_array[row][col] , A__ )
else:
SCREAMING_SNAKE_CASE = 0
return largest_square_area
def __a ( A__ : int , A__ : int , A__ : list[list[int]] ):
SCREAMING_SNAKE_CASE = [0] * (cols + 1)
SCREAMING_SNAKE_CASE = [0] * (cols + 1)
SCREAMING_SNAKE_CASE = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
SCREAMING_SNAKE_CASE = current_row[col + 1]
SCREAMING_SNAKE_CASE = next_row[col + 1]
SCREAMING_SNAKE_CASE = next_row[col]
if mat[row][col] == 1:
SCREAMING_SNAKE_CASE = 1 + min(A__ , A__ , A__ )
SCREAMING_SNAKE_CASE = max(current_row[col] , A__ )
else:
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]])) | 16 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase__ :
def __init__( self : Optional[Any] , _lowercase : int=2 , _lowercase : Optional[Any]=3 , _lowercase : Any=64 , _lowercase : Tuple=None ):
A = np.random.default_rng(_lowercase )
A = length
A = rng.normal(size=(length,) ).astype(np.floataa )
A = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : str ):
return self.length
def __getitem__( self : List[str] , _lowercase : int ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[int] , _lowercase : Any=0 , _lowercase : List[Any]=0 , _lowercase : Optional[int]=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = True
def __a ( self : Optional[Any] , _lowercase : str=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a[0] + self.b[0]
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[Any] , _lowercase : Any=0 , _lowercase : List[str]=0 , _lowercase : str=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = True
def __a ( self : int , _lowercase : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a + self.b
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ = 16 ) -> Optional[Any]:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
A = AutoTokenizer.from_pretrained('bert-base-cased' )
A = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
A = load_dataset('csv' , data_files=UpperCamelCase__ )
A = datasets['train'].unique('label' )
A = {v: i for i, v in enumerate(UpperCamelCase__ )}
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
if "label" in examples:
A = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(UpperCamelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=2 )
A = DataLoader(tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 690 | 0 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
UpperCAmelCase_ : Optional[Any] = 1.0_5457_1817e-34 # unit of ℏ : J * s
UpperCAmelCase_ : Union[str, Any] = 3e8 # unit of c : m * s^-1
def __SCREAMING_SNAKE_CASE ( a__ : float ,a__ : float ,a__ : float ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
__A : List[Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
__A : Tuple = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
__A : Optional[int] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase__ ) -> list[int]: # This function is recursive
"""simple docstring"""
A = len(UpperCamelCase__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
A = array[0]
A = False
A = 1
A = []
while not is_found and i < array_length:
if array[i] < pivot:
A = True
A = [element for element in array[i:] if element >= array[i]]
A = longest_subsequence(UpperCamelCase__ )
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
A = temp_array
else:
i += 1
A = [element for element in array[1:] if element >= pivot]
A = [pivot, *longest_subsequence(UpperCamelCase__ )]
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 0 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : float | Decimal , SCREAMING_SNAKE_CASE_ : float = 10**-10 ):
'''simple docstring'''
_lowerCAmelCase = a
while True:
_lowerCAmelCase = Decimal(SCREAMING_SNAKE_CASE_ ) - (
Decimal(eval(SCREAMING_SNAKE_CASE_ ) ) / Decimal(eval(str(diff(SCREAMING_SNAKE_CASE_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(SCREAMING_SNAKE_CASE_ ) ) < precision: # noqa: S307
return float(SCREAMING_SNAKE_CASE_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
| 18 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase : Optional[int] = typing.Union[np.floataa, int, float] # noqa: UP007
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(UpperCamelCase__ ) - np.asarray(UpperCamelCase__ )) ** 2 ) )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(UpperCamelCase__ , UpperCamelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def __snake_case ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
benchmark()
| 690 | 0 |
"""simple docstring"""
import random
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = a[left_index]
_UpperCamelCase = left_index + 1
for j in range(left_index + 1, __snake_case ):
if a[j] < pivot:
_UpperCamelCase , _UpperCamelCase = a[i], a[j]
i += 1
_UpperCamelCase , _UpperCamelCase = a[i - 1], a[left_index]
return i - 1
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Tuple:
"""simple docstring"""
if left < right:
_UpperCamelCase = random.randint(__snake_case, right - 1 )
_UpperCamelCase , _UpperCamelCase = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_UpperCamelCase = partition(__snake_case, __snake_case, __snake_case )
quick_sort_random(
__snake_case, __snake_case, __snake_case ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__snake_case, pivot_index + 1, __snake_case ) # recursive quicksort to the right of the pivot point
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = input('''Enter numbers separated by a comma:\n''' ).strip()
_UpperCamelCase = [int(__snake_case ) for item in user_input.split(''',''' )]
quick_sort_random(__snake_case, 0, len(__snake_case ) )
print(__snake_case )
if __name__ == "__main__":
main()
| 19 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCamelCase : List[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , ) -> Any:
"""simple docstring"""
output_path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , enable_onnx_checker=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
else:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
@torch.no_grad()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> str:
"""simple docstring"""
A = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
A = 'cpu'
A = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=UpperCamelCase__ ).to(UpperCamelCase__ )
A = Path(UpperCamelCase__ )
# TEXT ENCODER
A = pipeline.text_encoder.config.max_position_embeddings
A = pipeline.text_encoder.config.hidden_size
A = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=UpperCamelCase__ , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , )
del pipeline.text_encoder
# UNET
A = pipeline.unet.config.in_channels
A = pipeline.unet.config.sample_size
A = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=UpperCamelCase__ , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , )
A = str(unet_path.absolute().as_posix() )
A = os.path.dirname(UpperCamelCase__ )
A = onnx.load(UpperCamelCase__ )
# clean up existing tensor files
shutil.rmtree(UpperCamelCase__ )
os.mkdir(UpperCamelCase__ )
# collate external tensor files into one
onnx.save_model(
UpperCamelCase__ , UpperCamelCase__ , save_as_external_data=UpperCamelCase__ , all_tensors_to_one_file=UpperCamelCase__ , location='weights.pb' , convert_attribute=UpperCamelCase__ , )
del pipeline.unet
# VAE ENCODER
A = pipeline.vae
A = vae_encoder.config.in_channels
A = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
A = lambda UpperCamelCase__ , UpperCamelCase__ : vae_encoder.encode(UpperCamelCase__ , UpperCamelCase__ )[0].sample()
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
# VAE DECODER
A = pipeline.vae
A = vae_decoder.config.latent_channels
A = vae_decoder.config.out_channels
# forward only through the decoder part
A = vae_encoder.decode
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
A = pipeline.safety_checker
A = safety_checker.config.vision_config.num_channels
A = safety_checker.config.vision_config.image_size
A = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=UpperCamelCase__ , )
del pipeline.safety_checker
A = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
A = pipeline.feature_extractor
else:
A = None
A = None
A = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(UpperCamelCase__ )
print('ONNX pipeline saved to' , UpperCamelCase__ )
del pipeline
del onnx_pipeline
A = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : str = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 690 | 0 |
from math import factorial
def _lowercase( __a : int , __a : int , __a : float ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__a , __a ) or not isinstance(__a , __a ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
a__ =(prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
a__ =float(factorial(__a ) )
coefficient /= factorial(__a ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 20 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCamelCase : List[str] = Lock()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A = min(UpperCamelCase__ , UpperCamelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A = max(UpperCamelCase__ , UpperCamelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
A = []
A = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A = temp_rs
A = temp_rr
for i in range(1 , len(UpperCamelCase__ ) - 1 ):
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A = temp_rs
A = temp_rr
process_array_.append(
Process(
target=UpperCamelCase__ , args=(
len(UpperCamelCase__ ) - 1,
arr[len(UpperCamelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCamelCase__ ) ):
A = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __snake_case ( ) -> Optional[Any]:
"""simple docstring"""
A = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*UpperCamelCase__ )
A = odd_even_transposition(UpperCamelCase__ )
print('Sorted List\n' )
print(*UpperCamelCase__ )
if __name__ == "__main__":
main()
| 690 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Tuple =SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["""stage2""", """stage3""", """stage4"""] , )
__magic_name__ : Tuple =DetaConfig(
backbone_config=lowerCamelCase , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=lowerCamelCase , with_box_refine=lowerCamelCase , two_stage=lowerCamelCase , )
# set labels
__magic_name__ : Optional[Any] ="""huggingface/label-files"""
if "o365" in model_name:
__magic_name__ : int =366
__magic_name__ : Any ="""object365-id2label.json"""
else:
__magic_name__ : Optional[int] =91
__magic_name__ : Any ="""coco-detection-id2label.json"""
__magic_name__ : Union[str, Any] =num_labels
__magic_name__ : Any =json.load(open(cached_download(hf_hub_url(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
__magic_name__ : Union[str, Any] ={int(lowerCamelCase ): v for k, v in idalabel.items()}
__magic_name__ : Union[str, Any] =idalabel
__magic_name__ : Tuple ={v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =[]
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.reduction.weight", F"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.weight", F"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.bias", F"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") )
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") )
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") )
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") )
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") )
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", F"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", F"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", F"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", F"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.weight", F"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.bias", F"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.weight", F"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.bias", F"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.weight", F"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", F"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", F"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", F"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", F"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", F"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", F"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.weight", F"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.weight", F"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.bias", F"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : str =dct.pop(lowerCamelCase )
__magic_name__ : List[str] =val
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : Tuple =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__magic_name__ : Optional[int] =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__magic_name__ : List[str] =state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
__magic_name__ : Any =state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ : List[Any] =in_proj_weight[:dim, :]
__magic_name__ : Tuple =in_proj_bias[: dim]
__magic_name__ : str =in_proj_weight[
dim : dim * 2, :
]
__magic_name__ : Optional[int] =in_proj_bias[
dim : dim * 2
]
__magic_name__ : Union[str, Any] =in_proj_weight[
-dim :, :
]
__magic_name__ : Tuple =in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
# transformer decoder self-attention layers
__magic_name__ : Union[str, Any] =config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__magic_name__ : Dict =state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
__magic_name__ : int =state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ : int =in_proj_weight[:hidden_size, :]
__magic_name__ : Tuple =in_proj_bias[:hidden_size]
__magic_name__ : int =in_proj_weight[
hidden_size : hidden_size * 2, :
]
__magic_name__ : List[Any] =in_proj_bias[hidden_size : hidden_size * 2]
__magic_name__ : str =in_proj_weight[-hidden_size:, :]
__magic_name__ : str =in_proj_bias[-hidden_size:]
def lowerCAmelCase_ ( ):
__magic_name__ : int ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
__magic_name__ : str =Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Dict =get_deta_config(lowerCamelCase )
# load original state dict
if model_name == "deta-swin-large":
__magic_name__ : List[str] =hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""" )
elif model_name == "deta-swin-large-o365":
__magic_name__ : Dict =hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""" )
else:
raise ValueError(F"Model name {model_name} not supported" )
__magic_name__ : int =torch.load(lowerCamelCase , map_location="""cpu""" )["""model"""]
# original state dict
for name, param in state_dict.items():
print(lowerCamelCase , param.shape )
# rename keys
__magic_name__ : Any =create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_swin_q_k_v(lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__magic_name__ : Any =state_dict.pop(lowerCamelCase )
__magic_name__ : Any =val
if "input_proj" in key:
__magic_name__ : List[str] =state_dict.pop(lowerCamelCase )
__magic_name__ : Optional[Any] =val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__magic_name__ : int =state_dict.pop(lowerCamelCase )
__magic_name__ : Any =val
# finally, create HuggingFace model and load state dict
__magic_name__ : str =DetaForObjectDetection(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
__magic_name__ : Optional[Any] ="""cuda""" if torch.cuda.is_available() else """cpu"""
model.to(lowerCamelCase )
# load image processor
__magic_name__ : Any =DetaImageProcessor(format="""coco_detection""" )
# verify our conversion on image
__magic_name__ : Tuple =prepare_img()
__magic_name__ : Optional[Any] =processor(images=lowerCamelCase , return_tensors="""pt""" )
__magic_name__ : int =encoding["""pixel_values"""]
__magic_name__ : List[str] =model(pixel_values.to(lowerCamelCase ) )
# verify logits
print("""Logits:""" , outputs.logits[0, :3, :3] )
print("""Boxes:""" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__magic_name__ : str =torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
__magic_name__ : Dict =torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
__magic_name__ : List[Any] =torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
__magic_name__ : Optional[Any] =torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(lowerCamelCase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(lowerCamelCase ) , atol=1E-4 )
print("""Everything ok!""" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""" )
model.push_to_hub(F"jozhang97/{model_name}" )
processor.push_to_hub(F"jozhang97/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCAmelCase_ : str = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 21 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
UpperCamelCase : int = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
UpperCamelCase : List[Any] = dataset.iloc[:, 1:2].values
UpperCamelCase : Any = dataset.iloc[:, 2].values
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = train_test_split(X, y, test_size=0.2, random_state=0)
UpperCamelCase : List[str] = PolynomialFeatures(degree=4)
UpperCamelCase : Optional[int] = poly_reg.fit_transform(X)
UpperCamelCase : List[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
plt.scatter(UpperCamelCase__ , UpperCamelCase__ , color='red' )
plt.plot(UpperCamelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase__ ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 690 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : str = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class A ( _a ):
lowercase_ = 'dpr'
def __init__( self : Any , lowerCAmelCase_ : Any=3_05_22 , lowerCAmelCase_ : Tuple=7_68 , lowerCAmelCase_ : Union[str, Any]=12 , lowerCAmelCase_ : str=12 , lowerCAmelCase_ : List[str]=30_72 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[Any]=5_12 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Optional[Any]=0.0_2 , lowerCAmelCase_ : Any=1e-12 , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Optional[Any]="absolute" , lowerCAmelCase_ : int = 0 , **lowerCAmelCase_ : Any , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = projection_dim
_a = position_embedding_type
| 22 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : List[str] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = True , **_lowercase : Tuple , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A = image_std if image_std is not None else OPENAI_CLIP_STD
A = do_convert_rgb
def __a ( self : str , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Union[str, Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Optional[int] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowercase : int , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , param_name='size' , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' , default_to_square=_lowercase )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A = [convert_to_rgb(_lowercase ) for image in images]
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 690 | 0 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase):
UpperCamelCase_ = cva.getAffineTransform(__lowercase , __lowercase)
return cva.warpAffine(__lowercase , __lowercase , (rows, cols))
if __name__ == "__main__":
# read original image
snake_case__ : Optional[int] = cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
snake_case__ : Any = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
snake_case__ , snake_case__ : Optional[Any] = gray_img.shape
# set different points to rotate image
snake_case__ : List[Any] = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
snake_case__ : Union[str, Any] = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
snake_case__ : Optional[int] = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
snake_case__ : int = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
snake_case__ : Optional[Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
snake_case__ : Optional[int] = plt.figure(1)
snake_case__ : int = ["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 23 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowercase )
try:
pickle.loads(pickle.dumps(_lowercase ) )
except Exception as e:
self.fail(f'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 690 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Dict = VideoToVideoSDPipeline
__lowercase : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''}) - {'''image''', '''width''', '''height'''}
__lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''}) - {'''image'''}
__lowercase : Optional[int] = PipelineTesterMixin.required_optional_params - {'''latents'''}
__lowercase : Any = False
# No `output_type`.
__lowercase : Union[str, Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
])
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
__snake_case = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
__snake_case = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__snake_case = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
__snake_case = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__snake_case = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = VideoToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__snake_case = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__snake_case = '''np'''
__snake_case = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__snake_case = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__snake_case = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE , expected_max_diff=5E-3 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case = torch.randn((1, 10, 3, 1024, 576) , generator=__SCREAMING_SNAKE_CASE )
__snake_case = video.to('''cuda''' )
__snake_case = '''Spiderman is surfing'''
__snake_case = pipe(__SCREAMING_SNAKE_CASE , video=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=3 , output_type='''pt''' ).frames
__snake_case = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 24 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """convbert"""
def __init__( self : Optional[int] , _lowercase : List[Any]=30_522 , _lowercase : List[str]=768 , _lowercase : Optional[Any]=12 , _lowercase : Any=12 , _lowercase : str=3_072 , _lowercase : List[str]="gelu" , _lowercase : Dict=0.1 , _lowercase : Dict=0.1 , _lowercase : Any=512 , _lowercase : List[str]=2 , _lowercase : Tuple=0.0_2 , _lowercase : List[Any]=1e-12 , _lowercase : List[str]=1 , _lowercase : Tuple=0 , _lowercase : Any=2 , _lowercase : Union[str, Any]=768 , _lowercase : str=2 , _lowercase : Any=9 , _lowercase : Union[str, Any]=1 , _lowercase : Dict=None , **_lowercase : Union[str, Any] , ):
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = embedding_size
A = head_ratio
A = conv_kernel_size
A = num_groups
A = classifier_dropout
class lowerCamelCase__ ( UpperCAmelCase_ ):
@property
def __a ( self : str ):
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 690 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = []
for part_id in partition_order:
SCREAMING_SNAKE_CASE : str = df.where(f"SPARK_PARTITION_ID() = {part_id}").collect()
for row_idx, row in enumerate(_a):
expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()))
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : str = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
SCREAMING_SNAKE_CASE : Union[str, Any] = spark.range(100).repartition(1)
SCREAMING_SNAKE_CASE : Dict = Spark(_a)
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16)
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Tuple = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
SCREAMING_SNAKE_CASE : Any = spark.range(10).repartition(2)
SCREAMING_SNAKE_CASE : Tuple = [1, 0]
SCREAMING_SNAKE_CASE : Union[str, Any] = _generate_iterable_examples(_a , _a) # Reverse the partitions.
SCREAMING_SNAKE_CASE : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , _a)
for i, (row_id, row_dict) in enumerate(generate_fn()):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
SCREAMING_SNAKE_CASE : List[Any] = spark.range(10).repartition(1)
SCREAMING_SNAKE_CASE : Tuple = SparkExamplesIterable(_a)
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_a):
assert row_id == f"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : int = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
SCREAMING_SNAKE_CASE : int = spark.range(30).repartition(3)
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator") as generator_mock:
SCREAMING_SNAKE_CASE : int = lambda _a: x.reverse()
SCREAMING_SNAKE_CASE : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , [2, 1, 0])
SCREAMING_SNAKE_CASE : str = SparkExamplesIterable(_a).shuffle_data_sources(_a)
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
SCREAMING_SNAKE_CASE : Optional[Any] = spark.range(20).repartition(4)
# Partitions 0 and 2
SCREAMING_SNAKE_CASE : int = SparkExamplesIterable(_a).shard_data_sources(worker_id=0 , num_workers=2)
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE : str = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , [0, 2])
for i, (row_id, row_dict) in enumerate(_a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
SCREAMING_SNAKE_CASE : Any = SparkExamplesIterable(_a).shard_data_sources(worker_id=1 , num_workers=2)
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , [1, 3])
for i, (row_id, row_dict) in enumerate(_a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : str = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
SCREAMING_SNAKE_CASE : List[str] = spark.range(100).repartition(1)
SCREAMING_SNAKE_CASE : int = Spark(_a)
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1)
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100 | 25 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 690 | 0 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _A ( tf.keras.layers.Layer ):
def __init__( self : Dict , __magic_name__ : Dict[str, int] , __magic_name__ : List[str] , __magic_name__ : int = None , __magic_name__ : int = None ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : str = pad_token_id
__snake_case : Tuple = max_length
__snake_case : Dict = vocab
__snake_case : Optional[Any] = merges
__snake_case : Optional[Any] = BytePairTokenizer(__magic_name__ , __magic_name__ , sequence_length=__magic_name__ )
@classmethod
def lowercase__ ( cls : int , __magic_name__ : GPTaTokenizer , *__magic_name__ : int , **__magic_name__ : int ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = [""" """.join(__magic_name__ ) for m in tokenizer.bpe_ranks.keys()]
__snake_case : List[str] = tokenizer.get_vocab()
return cls(__magic_name__ , __magic_name__ , *__magic_name__ , **__magic_name__ )
@classmethod
def lowercase__ ( cls : int , __magic_name__ : Union[str, os.PathLike] , *__magic_name__ : Dict , **__magic_name__ : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = GPTaTokenizer.from_pretrained(__magic_name__ , *__magic_name__ , **__magic_name__ )
return cls.from_tokenizer(__magic_name__ , *__magic_name__ , **__magic_name__ )
@classmethod
def lowercase__ ( cls : Any , __magic_name__ : Dict ) -> List[Any]:
"""simple docstring"""
return cls(**__magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowercase__ ( self : Optional[Any] , __magic_name__ : Any , __magic_name__ : int = None ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = self.tf_tokenizer(__magic_name__ )
__snake_case : Any = tf.ones_like(__magic_name__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
__snake_case : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
__snake_case , __snake_case : Any = pad_model_inputs(
__magic_name__ , max_seq_length=__magic_name__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 26 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
A = {'+', '-', '*', '/'}
A = []
for token in postfix_notation:
if token in operations:
A , A = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 0 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCamelCase:
'''simple docstring'''
__magic_name__ = None
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
_A = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(snake_case_ , 'feat_extract.json' )
feat_extract_first.to_json_file(snake_case_ )
_A = self.feature_extraction_class.from_json_file(snake_case_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
_A = self.feature_extraction_class.from_pretrained(snake_case_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class()
self.assertIsNotNone(snake_case_ )
| 27 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase : Any = None
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : str = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Optional[int] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCamelCase : str = "▁"
# Segments (not really needed)
UpperCamelCase : str = 0
UpperCamelCase : int = 1
UpperCamelCase : List[Any] = 2
UpperCamelCase : Union[str, Any] = 3
UpperCamelCase : Optional[Any] = 4
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = """left"""
lowerCAmelCase = XLNetTokenizer
def __init__( self : Tuple , _lowercase : List[Any]=None , _lowercase : Any=None , _lowercase : int=False , _lowercase : Tuple=True , _lowercase : Union[str, Any]=False , _lowercase : int="<s>" , _lowercase : Optional[int]="</s>" , _lowercase : Dict="<unk>" , _lowercase : Optional[int]="<sep>" , _lowercase : int="<pad>" , _lowercase : Dict="<cls>" , _lowercase : str="<mask>" , _lowercase : List[str]=["<eop>", "<eod>"] , **_lowercase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
A = 3
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def __a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 0 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"Salesforce/codegen-350M-mono": 2_0_4_8,
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[str] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[str] = ['''input_ids''', '''attention_mask''']
A : Optional[int] = CodeGenTokenizer
def __init__( self, A=None, A=None, A=None, A="<|endoftext|>", A="<|endoftext|>", A="<|endoftext|>", A=False, **A, ):
'''simple docstring'''
super().__init__(
A, A, tokenizer_file=A, unk_token=A, bos_token=A, eos_token=A, add_prefix_space=A, **A, )
if kwargs.pop('add_bos_token', A ):
SCREAMING_SNAKE_CASE : str = kwargs.pop('name_or_path', '' )
raise ValueError(
'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'
'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'
F"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
F"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'
' so that the fast tokenizer works correctly.' )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', A ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Optional[int] = getattr(A, pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE : List[Any] = add_prefix_space
SCREAMING_SNAKE_CASE : Optional[Any] = pre_tok_class(**A )
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
def UpperCamelCase_ ( self, *A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = kwargs.get('is_split_into_words', A )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A, **A )
def UpperCamelCase_ ( self, *A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('is_split_into_words', A )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A, **A )
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self._tokenizer.model.save(A, name=A )
return tuple(A )
def UpperCamelCase_ ( self, A, A = False, A = None, A = None, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().decode(
token_ids=A, skip_special_tokens=A, clean_up_tokenization_spaces=A, **A, )
if truncate_before_pattern is not None and len(A ) > 0:
SCREAMING_SNAKE_CASE : List[str] = self.truncate(A, A )
return decoded_text
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
def find_re(A, A, A ):
SCREAMING_SNAKE_CASE : Tuple = pattern.search(A, A )
return m.start() if m else -1
SCREAMING_SNAKE_CASE : List[Any] = [re.compile(A, re.MULTILINE ) for pattern in truncate_before_pattern]
SCREAMING_SNAKE_CASE : str = list(re.finditer('^print', A, re.MULTILINE ) )
if len(A ) > 1:
SCREAMING_SNAKE_CASE : Optional[Any] = completion[: prints[1].start()]
SCREAMING_SNAKE_CASE : str = list(re.finditer('^def', A, re.MULTILINE ) )
if len(A ) > 1:
SCREAMING_SNAKE_CASE : str = completion[: defs[1].start()]
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Dict = [
pos for pos in [find_re(A, A, A ) for terminal in terminals] if pos != -1
]
if len(A ) > 0:
return completion[: min(A )]
else:
return completion
| 28 |
"""simple docstring"""
from __future__ import annotations
UpperCamelCase : Any = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the reference grid
A = 1
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the action grid
A = init[0]
A = init[1]
A = 0
A = g + heuristic[x][y] # cost from starting cell to destination cell
A = [[f, g, x, y]]
A = False # flag that is set when search is complete
A = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A = cell.pop()
A = next_cell[2]
A = next_cell[3]
A = next_cell[1]
if x == goal[0] and y == goal[1]:
A = True
else:
for i in range(len(UpperCamelCase__ ) ): # to try out different valid actions
A = x + DIRECTIONS[i][0]
A = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A = g + cost
A = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A = 1
A = i
A = []
A = goal[0]
A = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A = x - DIRECTIONS[action[x][y]][0]
A = y - DIRECTIONS[action[x][y]][1]
A = xa
A = ya
invpath.append([x, y] )
A = []
for i in range(len(UpperCamelCase__ ) ):
path.append(invpath[len(UpperCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase : Any = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase : List[Any] = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase : Tuple = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase : Union[str, Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase : List[str] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase : Dict = 99
UpperCamelCase , UpperCamelCase : Optional[Any] = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 690 | 0 |
"""simple docstring"""
from math import ceil
def lowercase ( lowerCAmelCase__ = 1_001 ):
lowerCamelCase_ = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
lowerCamelCase_ = 2 * i + 1
lowerCamelCase_ = 2 * i
lowerCamelCase_ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 29 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : int = {"vocab_file": "sentencepiece.model"}
UpperCamelCase : Union[str, Any] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
UpperCamelCase : Union[str, Any] = {
"google/rembert": 256,
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Dict=True , _lowercase : List[str]=True , _lowercase : int="[CLS]" , _lowercase : str="[SEP]" , _lowercase : List[str]="[UNK]" , _lowercase : List[Any]="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : List[str]="[CLS]" , _lowercase : Any="[MASK]" , **_lowercase : Optional[Any] , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def __a ( self : Tuple ):
return len(self.sp_model )
def __a ( self : List[str] ):
A = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : List[str] , _lowercase : int ):
A = d
A = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __a ( self : Dict , _lowercase : Union[str, Any] , _lowercase : Dict=False ):
A = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def __a ( self : Dict , _lowercase : Tuple ):
return self.sp_model.PieceToId(_lowercase )
def __a ( self : str , _lowercase : Optional[int] ):
return self.sp_model.IdToPiece(_lowercase )
def __a ( self : Optional[int] , _lowercase : Optional[int] ):
A = self.sp_model.decode_pieces(_lowercase )
return out_string
def __a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def __a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('Vocabulary path ({}) should be a directory'.format(_lowercase ) )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__a = logging.get_logger(__name__)
__a = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class __a( _a , _a ):
"""simple docstring"""
lowerCAmelCase = '''bit'''
lowerCAmelCase = ['''preactivation''', '''bottleneck''']
lowerCAmelCase = ['''SAME''', '''VALID''']
def __init__( self ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=64 ,_SCREAMING_SNAKE_CASE=[256, 512, 1_024, 2_048] ,_SCREAMING_SNAKE_CASE=[3, 4, 6, 3] ,_SCREAMING_SNAKE_CASE="preactivation" ,_SCREAMING_SNAKE_CASE="relu" ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[Any]:
super().__init__(**_SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
UpperCAmelCase_ : str = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Dict = embedding_size
UpperCAmelCase_ : Union[str, Any] = hidden_sizes
UpperCAmelCase_ : Optional[int] = depths
UpperCAmelCase_ : Tuple = layer_type
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : int = global_padding
UpperCAmelCase_ : Optional[int] = num_groups
UpperCAmelCase_ : Dict = drop_path_rate
UpperCAmelCase_ : List[Any] = embedding_dynamic_padding
UpperCAmelCase_ : Optional[int] = output_stride
UpperCAmelCase_ : int = width_factor
UpperCAmelCase_ : Tuple = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 ,len(_SCREAMING_SNAKE_CASE ) + 1 )]
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE ,out_indices=_SCREAMING_SNAKE_CASE ,stage_names=self.stage_names ) | 30 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : List[Any] = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
UpperCamelCase : Any = {"mobilebert-uncased": 512}
UpperCamelCase : Any = {}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = MobileBertTokenizer
def __init__( self : Optional[int] , _lowercase : Optional[int]=None , _lowercase : Any=None , _lowercase : Optional[int]=True , _lowercase : int="[UNK]" , _lowercase : Dict="[SEP]" , _lowercase : Any="[PAD]" , _lowercase : str="[CLS]" , _lowercase : Union[str, Any]="[MASK]" , _lowercase : List[Any]=True , _lowercase : Any=None , **_lowercase : Optional[Any] , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowercase ) != tokenize_chinese_chars
):
A = getattr(_lowercase , normalizer_state.pop('type' ) )
A = do_lower_case
A = strip_accents
A = tokenize_chinese_chars
A = normalizer_class(**_lowercase )
A = do_lower_case
def __a ( self : List[Any] , _lowercase : Tuple , _lowercase : Any=None ):
A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 690 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase__ : List[str] = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str=None , __UpperCAmelCase : str=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[Any]=None , ) -> Tuple:
if attention_mask is None:
SCREAMING_SNAKE_CASE_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
SCREAMING_SNAKE_CASE_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : int=13 , _lowerCAmelCase : str=7 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Dict=99 , _lowerCAmelCase : Tuple=16 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : int=4 , _lowerCAmelCase : int=4 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Tuple=32 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Optional[int]=1 , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : Any=0.02 , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = pad_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = initializer_range
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
SCREAMING_SNAKE_CASE_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
SCREAMING_SNAKE_CASE_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = model_class_name(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model.encode(inputs_dict['input_ids'] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
SCREAMING_SNAKE_CASE_ = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
SCREAMING_SNAKE_CASE_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = model.decode(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = model_class_name(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model.encode(inputs_dict['input_ids'] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
SCREAMING_SNAKE_CASE_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
SCREAMING_SNAKE_CASE_ = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = 99
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
SCREAMING_SNAKE_CASE_ = input_ids.shape[0]
SCREAMING_SNAKE_CASE_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_config_and_data()
SCREAMING_SNAKE_CASE_ = FlaxBlenderbotForConditionalGeneration(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = lm_model(input_ids=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
SCREAMING_SNAKE_CASE_ = FlaxBlenderbotForConditionalGeneration(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
SCREAMING_SNAKE_CASE_ = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
SCREAMING_SNAKE_CASE_ = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = True
lowercase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = FlaxBlenderbotModelTester(self )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
@jax.jit
def encode_jitted(_lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : str ):
return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE_ = encode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = encode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
SCREAMING_SNAKE_CASE_ = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ):
return model.decode(
decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE_ = decode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = decode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
SCREAMING_SNAKE_CASE_ = np.ones((1, 1) ) * model.config.eos_token_id
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
SCREAMING_SNAKE_CASE_ = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
SCREAMING_SNAKE_CASE_ = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
SCREAMING_SNAKE_CASE_ = ['Sam']
SCREAMING_SNAKE_CASE_ = tokenizer(_lowerCAmelCase , return_tensors='jax' )
SCREAMING_SNAKE_CASE_ = model.generate(**_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'Sam is a great name. It means "sun" in Gaelic.'
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(_lowerCAmelCase , **_lowerCAmelCase )
assert generated_txt[0].strip() == tgt_text | 31 |
"""simple docstring"""
def __snake_case ( UpperCamelCase__ ) -> list[int]:
"""simple docstring"""
A = [0 for i in range(len(UpperCamelCase__ ) )]
# initialize interval's left pointer and right pointer
A , A = 0, 0
for i in range(1 , len(UpperCamelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
A = min(right_pointer - i + 1 , z_result[i - left_pointer] )
A = min_edge
while go_next(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
A , A = i, i + z_result[i] - 1
return z_result
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
"""simple docstring"""
return i + z_result[i] < len(UpperCamelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
A = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCamelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase_ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase_ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase_ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": 5_12,
"facebook/dpr-ctx_encoder-multiset-base": 5_12,
}
UpperCAmelCase_ = {
"facebook/dpr-question_encoder-single-nq-base": 5_12,
"facebook/dpr-question_encoder-multiset-base": 5_12,
}
UpperCAmelCase_ = {
"facebook/dpr-reader-single-nq-base": 5_12,
"facebook/dpr-reader-multiset-base": 5_12,
}
UpperCAmelCase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase_ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase_ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __UpperCamelCase ( A__ ):
__A : Optional[int] = VOCAB_FILES_NAMES
__A : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__A : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Optional[int] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( A__ ):
__A : Dict = VOCAB_FILES_NAMES
__A : List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__A : Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
UpperCAmelCase_ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
UpperCAmelCase_ = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(A__ )
class __UpperCamelCase :
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ):
if titles is None and texts is None:
return super().__call__(
_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , )
elif titles is None or texts is None:
_UpperCAmelCase = titles if texts is None else texts
return super().__call__(
_UpperCamelCase , _UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , )
_UpperCAmelCase = titles if not isinstance(_UpperCamelCase , _UpperCamelCase ) else [titles]
_UpperCAmelCase = texts if not isinstance(_UpperCamelCase , _UpperCamelCase ) else [texts]
_UpperCAmelCase = len(_UpperCamelCase )
_UpperCAmelCase = questions if not isinstance(_UpperCamelCase , _UpperCamelCase ) else [questions] * n_passages
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
raise ValueError(
f'''There should be as many titles than texts but got {len(_UpperCamelCase )} titles and {len(_UpperCamelCase )} texts.''' )
_UpperCAmelCase = super().__call__(_UpperCamelCase , _UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase )['''input_ids''']
_UpperCAmelCase = super().__call__(_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase )['''input_ids''']
_UpperCAmelCase = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_UpperCamelCase , _UpperCamelCase )
]
}
if return_attention_mask is not False:
_UpperCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_UpperCAmelCase = attention_mask
return self.pad(_UpperCamelCase , padding=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors=_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 16 , _UpperCamelCase = 64 , _UpperCamelCase = 4 , ):
_UpperCAmelCase = reader_input['''input_ids''']
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reader_output[:3]
_UpperCAmelCase = len(_UpperCamelCase )
_UpperCAmelCase = sorted(range(_UpperCamelCase ) , reverse=_UpperCamelCase , key=relevance_logits.__getitem__ )
_UpperCAmelCase = []
for doc_id in sorted_docs:
_UpperCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_UpperCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_UpperCAmelCase = sequence_ids.index(self.pad_token_id )
else:
_UpperCAmelCase = len(_UpperCamelCase )
_UpperCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_UpperCamelCase , top_spans=_UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_UpperCamelCase , start_index=_UpperCamelCase , end_index=_UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = []
for start_index, start_score in enumerate(_UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_UpperCAmelCase = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : x[1] , reverse=_UpperCamelCase )
_UpperCAmelCase = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
_UpperCAmelCase = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A__ )
class __UpperCamelCase ( A__ , A__ ):
__A : Dict = VOCAB_FILES_NAMES
__A : Dict = READER_PRETRAINED_VOCAB_FILES_MAP
__A : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Optional[Any] = READER_PRETRAINED_INIT_CONFIGURATION
__A : Tuple = ["""input_ids""", """attention_mask"""] | 32 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = LDMTextToImagePipeline
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase = False
def __a ( self : Dict ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A = CLIPTextModel(_lowercase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __a ( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any]=0 ):
if str(_lowercase ).startswith('mps' ):
A = torch.manual_seed(_lowercase )
else:
A = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Any ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = LDMTextToImagePipeline(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_dummy_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
A = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : int , _lowercase : List[Any] , _lowercase : int=torch.floataa , _lowercase : int=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Union[str, Any] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
A = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
A = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Tuple=torch.floataa , _lowercase : Optional[Any]=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : List[str] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images[0]
A = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
A = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 690 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = """▁"""
lowerCamelCase__ : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCamelCase__ : Union[str, Any] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"""
),
}
}
lowerCamelCase__ : List[Any] = {
"""facebook/nllb-200-distilled-600M""": 1_0_2_4,
}
# fmt: off
lowerCamelCase__ : List[str] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = VOCAB_FILES_NAMES
__lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Optional[Any] = ['input_ids', 'attention_mask']
__lowercase : List[int] = []
__lowercase : List[int] = []
def __init__( self:List[Any] , _a:int , _a:Optional[int]="<s>" , _a:Any="</s>" , _a:int="</s>" , _a:str="<s>" , _a:Tuple="<unk>" , _a:Any="<pad>" , _a:str="<mask>" , _a:str=None , _a:Union[str, Any]=None , _a:List[Any]=None , _a:Optional[Dict[str, Any]] = None , _a:Any=None , _a:str=False , **_a:Tuple , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case__ = legacy_behaviour
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , tokenizer_file=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_a , **_a , )
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
snake_case__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case__ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case__ = 1
snake_case__ = len(self.sp_model )
snake_case__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_a )
}
snake_case__ = {v: k for k, v in self.lang_code_to_id.items()}
snake_case__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
snake_case__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case__ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
snake_case__ = src_lang if src_lang is not None else '''eng_Latn'''
snake_case__ = self.lang_code_to_id[self._src_lang]
snake_case__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self:Optional[Any] ):
snake_case__ = self.__dict__.copy()
snake_case__ = None
snake_case__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self:int , _a:List[Any] ):
snake_case__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ = {}
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def SCREAMING_SNAKE_CASE__ ( self:str ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:str ):
snake_case__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:List[int] , _a:Optional[List[int]] = None , _a:bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
snake_case__ = [1] * len(self.prefix_tokens )
snake_case__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_a )) + suffix_ones
return prefix_ones + ([0] * len(_a )) + ([0] * len(_a )) + suffix_ones
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:List[int] , _a:Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:List[int] , _a:Optional[List[int]] = None ):
snake_case__ = [self.sep_token_id]
snake_case__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:str , _a:Optional[str] , _a:Optional[str] , **_a:List[str] ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
snake_case__ = src_lang
snake_case__ = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
snake_case__ = self.convert_tokens_to_ids(_a )
snake_case__ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:str ):
return self.sp_model.encode(_a , out_type=_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case__ = self.sp_model.PieceToId(_a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:str ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:Any ):
snake_case__ = ''''''.join(_a ).replace(_a , ''' ''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:str , _a:Optional[str] = None ):
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
snake_case__ = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ ( self:str , _a:List[str] , _a:str = "eng_Latn" , _a:Optional[List[str]] = None , _a:str = "fra_Latn" , **_a:Union[str, Any] , ):
snake_case__ = src_lang
snake_case__ = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE__ ( self:int ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int ):
snake_case__ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
snake_case__ = []
snake_case__ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case__ = [self.cur_lang_code]
snake_case__ = [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:str ):
snake_case__ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
snake_case__ = []
snake_case__ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case__ = [self.cur_lang_code]
snake_case__ = [self.eos_token_id]
| 33 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase , cache_dir=_lowercase )
A = [t[-1] for t in os.walk(os.path.join(_lowercase , os.listdir(_lowercase )[0] , 'snapshots' ) )]
A = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 4
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(_lowercase , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
A = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_lowercase ) == num_samples
def __a ( self : Dict ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __a ( self : List[str] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : str ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : Any ):
A = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=_lowercase , steps_offset=1 , )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_lowercase , safety_checker=_lowercase , )
A = scheduler.create_state()
A = scheduler_state
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __a ( self : List[str] ):
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.device_count()
A = num_samples * [prompt]
A = jax.random.split(jax.random.PRNGKey(0 ) , _lowercase )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , use_memory_efficient_attention=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 690 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''unispeech-sat'''
def __init__( self , lowerCamelCase_=3_2 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-5 , lowerCamelCase_="group" , lowerCamelCase_="gelu" , lowerCamelCase_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCamelCase_=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase_=(1_0, 3, 3, 3, 3, 2, 2) , lowerCamelCase_=False , lowerCamelCase_=1_2_8 , lowerCamelCase_=1_6 , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=0.05 , lowerCamelCase_=1_0 , lowerCamelCase_=2 , lowerCamelCase_=0.0 , lowerCamelCase_=1_0 , lowerCamelCase_=0 , lowerCamelCase_=3_2_0 , lowerCamelCase_=2 , lowerCamelCase_=0.1 , lowerCamelCase_=1_0_0 , lowerCamelCase_=2_5_6 , lowerCamelCase_=2_5_6 , lowerCamelCase_=0.1 , lowerCamelCase_="mean" , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=2_5_6 , lowerCamelCase_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowerCamelCase_=(5, 3, 3, 1, 1) , lowerCamelCase_=(1, 2, 3, 1, 1) , lowerCamelCase_=5_1_2 , lowerCamelCase_=0 , lowerCamelCase_=1 , lowerCamelCase_=2 , lowerCamelCase_=5_0_4 , **lowerCamelCase_ , ) -> List[Any]:
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_)
UpperCamelCase = hidden_size
UpperCamelCase = feat_extract_norm
UpperCamelCase = feat_extract_activation
UpperCamelCase = list(lowerCamelCase_)
UpperCamelCase = list(lowerCamelCase_)
UpperCamelCase = list(lowerCamelCase_)
UpperCamelCase = conv_bias
UpperCamelCase = num_conv_pos_embeddings
UpperCamelCase = num_conv_pos_embedding_groups
UpperCamelCase = len(self.conv_dim)
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = feat_proj_dropout
UpperCamelCase = final_dropout
UpperCamelCase = layerdrop
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = vocab_size
UpperCamelCase = num_clusters
UpperCamelCase = do_stable_layer_norm
UpperCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase = num_codevectors_per_group
UpperCamelCase = num_codevector_groups
UpperCamelCase = contrastive_logits_temperature
UpperCamelCase = feat_quantizer_dropout
UpperCamelCase = num_negatives
UpperCamelCase = codevector_dim
UpperCamelCase = proj_codevector_dim
UpperCamelCase = diversity_loss_weight
# ctc loss
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase = list(lowerCamelCase_)
UpperCamelCase = list(lowerCamelCase_)
UpperCamelCase = list(lowerCamelCase_)
UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase__ ( self) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1) | 34 |
"""simple docstring"""
import os
import sys
UpperCamelCase : Optional[int] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase : Dict = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
"""simple docstring"""
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 690 | 0 |
from __future__ import annotations
def a ( A__ ) -> bool:
'''simple docstring'''
if len(A__ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
SCREAMING_SNAKE_CASE__ : List[str] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : Tuple , _lowercase : bool = True , _lowercase : Optional[Dict[str, int]] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[str] , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Any , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[Any] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : float , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Any , _lowercase : ImageInput , _lowercase : Optional[bool] = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[float] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowercase : Any , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def __a ( self : int , _lowercase : List[str] , _lowercase : List[Tuple] = None ):
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_lowercase ):
A = target_sizes.numpy()
A = []
for idx in range(len(_lowercase ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_lowercase )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 690 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Optional[int] = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[str] = '''timm_backbone'''
def __init__( self ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = backbone
snake_case : Dict = num_channels
snake_case : Optional[int] = features_only
snake_case : Any = use_pretrained_backbone
snake_case : Union[str, Any] = True
snake_case : Optional[int] = out_indices if out_indices is not None else (-1,)
| 36 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __snake_case ( UpperCamelCase__ = "laptop" ) -> DataFrame:
"""simple docstring"""
A = f'https://www.amazon.in/laptop/s?k={product}'
A = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
A = BeautifulSoup(requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
A = item.ha.text
A = 'https://www.amazon.in/' + item.ha.a['href']
A = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
A = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
A = 'Not available'
try:
A = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
A = ''
try:
A = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
A = float('nan' )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = ' '
A = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCamelCase : Any = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 690 | 0 |
def UpperCamelCase_ ( __a , __a = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
a__ : Dict = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
a__ : Dict = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__a , 1 ):
if n < _p:
# then we have our last prime to check
a__ : Any = primes[:idx]
break
a__, a__ : List[str] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
a__ : List[str] = False
for r in range(__a ):
a__ : List[str] = pow(__a , d * 2**r , __a )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
a__ : Union[str, Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCamelCase_ ( ) -> None:
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 37 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : List[str] , _lowercase : Optional[Any] , _lowercase : int=7 , _lowercase : List[str]=3 , _lowercase : Tuple=18 , _lowercase : Dict=30 , _lowercase : Any=400 , _lowercase : int=True , _lowercase : List[Any]=None , _lowercase : Tuple=True , _lowercase : List[Any]=False , _lowercase : str=True , _lowercase : List[str]=True , _lowercase : int=[0.5, 0.5, 0.5] , _lowercase : Optional[int]=[0.5, 0.5, 0.5] , ):
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size if size is not None else {'height': 18, 'width': 20}
A = do_thumbnail
A = do_align_axis
A = do_pad
A = do_normalize
A = image_mean
A = image_std
def __a ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
A = DonutImageProcessingTester(self )
@property
def __a ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Union[str, Any] ):
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , 'do_resize' ) )
self.assertTrue(hasattr(_lowercase , 'size' ) )
self.assertTrue(hasattr(_lowercase , 'do_thumbnail' ) )
self.assertTrue(hasattr(_lowercase , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_lowercase , 'do_pad' ) )
self.assertTrue(hasattr(_lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowercase , 'image_mean' ) )
self.assertTrue(hasattr(_lowercase , 'image_std' ) )
def __a ( self : int ):
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
A = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def __a ( self : Any ):
pass
@is_flaky()
def __a ( self : int ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[str] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[Any] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 690 | 0 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(
features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
snake_case__ : Tuple = Generator(
cache_dir=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , gen_kwargs=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self ):
# Build iterable dataset
if self.streaming:
snake_case__ : Dict = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
snake_case__ : Optional[Any] = None
snake_case__ : List[Any] = None
snake_case__ : Dict = None
snake_case__ : int = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
snake_case__ : Optional[int] = self.builder.as_dataset(
split="""train""" , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
| 38 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase__ :
def __init__( self : Optional[Any] , _lowercase : int=2 , _lowercase : Optional[Any]=3 , _lowercase : Any=64 , _lowercase : Tuple=None ):
A = np.random.default_rng(_lowercase )
A = length
A = rng.normal(size=(length,) ).astype(np.floataa )
A = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : str ):
return self.length
def __getitem__( self : List[str] , _lowercase : int ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[int] , _lowercase : Any=0 , _lowercase : List[Any]=0 , _lowercase : Optional[int]=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = True
def __a ( self : Optional[Any] , _lowercase : str=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a[0] + self.b[0]
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[Any] , _lowercase : Any=0 , _lowercase : List[str]=0 , _lowercase : str=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = True
def __a ( self : int , _lowercase : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a + self.b
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ = 16 ) -> Optional[Any]:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
A = AutoTokenizer.from_pretrained('bert-base-cased' )
A = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
A = load_dataset('csv' , data_files=UpperCamelCase__ )
A = datasets['train'].unique('label' )
A = {v: i for i, v in enumerate(UpperCamelCase__ )}
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
if "label" in examples:
A = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(UpperCamelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=2 )
A = DataLoader(tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 690 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "dpt"
def __init__( self : Optional[Any] , _UpperCamelCase : Tuple=7_6_8 , _UpperCamelCase : Dict=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : List[Any]=3_0_7_2 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : Optional[int]=0.0 , _UpperCamelCase : Optional[int]=0.02 , _UpperCamelCase : List[str]=1e-12 , _UpperCamelCase : Any=3_8_4 , _UpperCamelCase : int=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : Dict=False , _UpperCamelCase : str=True , _UpperCamelCase : Union[str, Any]=[2, 5, 8, 1_1] , _UpperCamelCase : List[str]="project" , _UpperCamelCase : Optional[int]=[4, 2, 1, 0.5] , _UpperCamelCase : Dict=[9_6, 1_9_2, 3_8_4, 7_6_8] , _UpperCamelCase : Dict=2_5_6 , _UpperCamelCase : Optional[Any]=-1 , _UpperCamelCase : int=False , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : str=0.4 , _UpperCamelCase : Tuple=2_5_5 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Tuple=[1, 1_0_2_4, 2_4, 2_4] , _UpperCamelCase : List[str]=[0, 1] , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : Dict , ) ->Any:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
snake_case_ = backbone_featmap_shape
snake_case_ = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
snake_case_ = None
snake_case_ = None
snake_case_ = []
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias
snake_case_ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
snake_case_ = readout_type
snake_case_ = reassemble_factors
snake_case_ = neck_hidden_sizes
snake_case_ = fusion_hidden_size
snake_case_ = head_in_index
snake_case_ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = semantic_loss_ignore_index
snake_case_ = semantic_classifier_dropout
def snake_case__( self : List[str] ) ->List[Any]:
snake_case_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output | 39 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase__ ) -> list[int]: # This function is recursive
"""simple docstring"""
A = len(UpperCamelCase__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
A = array[0]
A = False
A = 1
A = []
while not is_found and i < array_length:
if array[i] < pivot:
A = True
A = [element for element in array[i:] if element >= array[i]]
A = longest_subsequence(UpperCamelCase__ )
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
A = temp_array
else:
i += 1
A = [element for element in array[1:] if element >= pivot]
A = [pivot, *longest_subsequence(UpperCamelCase__ )]
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase_ ( a__ , a__ ):
UpperCAmelCase__ : List[str] = "dinat"
UpperCAmelCase__ : List[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=[3, 4, 6, 5], SCREAMING_SNAKE_CASE_=[2, 4, 8, 16], SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]], SCREAMING_SNAKE_CASE_=3.0, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1e-5, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = patch_size
UpperCamelCase : Optional[Any] = num_channels
UpperCamelCase : List[str] = embed_dim
UpperCamelCase : List[Any] = depths
UpperCamelCase : int = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = num_heads
UpperCamelCase : str = kernel_size
UpperCamelCase : str = dilations
UpperCamelCase : Union[str, Any] = mlp_ratio
UpperCamelCase : Any = qkv_bias
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase : List[str] = drop_path_rate
UpperCamelCase : int = hidden_act
UpperCamelCase : str = layer_norm_eps
UpperCamelCase : List[str] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase : Optional[Any] = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE_ ) - 1) )
UpperCamelCase : Optional[int] = layer_scale_init_value
UpperCamelCase : List[Any] = ['stem'] + [F"""stage{idx}""" for idx in range(1, len(SCREAMING_SNAKE_CASE_ ) + 1 )]
UpperCamelCase , UpperCamelCase : Optional[Any] = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_, out_indices=SCREAMING_SNAKE_CASE_, stage_names=self.stage_names )
| 40 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase : Optional[int] = typing.Union[np.floataa, int, float] # noqa: UP007
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(UpperCamelCase__ ) - np.asarray(UpperCamelCase__ )) ** 2 ) )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(UpperCamelCase__ , UpperCamelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def __snake_case ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
benchmark()
| 690 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 'switch_transformers'
SCREAMING_SNAKE_CASE : Union[str, Any] = ['past_key_values']
SCREAMING_SNAKE_CASE : Tuple = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Optional[Any] ,lowercase__ : Tuple=3_2_1_2_8 ,lowercase__ : str=7_6_8 ,lowercase__ : Any=6_4 ,lowercase__ : List[str]=2_0_4_8 ,lowercase__ : Union[str, Any]=6_4 ,lowercase__ : List[str]=1_2 ,lowercase__ : List[Any]=3 ,lowercase__ : str=1_2 ,lowercase__ : Optional[int]=3 ,lowercase__ : Union[str, Any]=1_2 ,lowercase__ : Any=8 ,lowercase__ : List[str]=False ,lowercase__ : Any=0.0_1 ,lowercase__ : str="float32" ,lowercase__ : List[str]=False ,lowercase__ : int=3_2 ,lowercase__ : str=1_2_8 ,lowercase__ : List[str]=0.1 ,lowercase__ : Dict=1e-6 ,lowercase__ : Dict=0.0_0_1 ,lowercase__ : Union[str, Any]=0.0_0_1 ,lowercase__ : Optional[Any]=1.0 ,lowercase__ : int="relu" ,lowercase__ : Any=True ,lowercase__ : int=False ,lowercase__ : List[Any]=True ,lowercase__ : Optional[Any]=0 ,lowercase__ : Union[str, Any]=1 ,**lowercase__ : List[Any] ,):
__lowercase = vocab_size
__lowercase = d_model
__lowercase = d_kv
__lowercase = d_ff
__lowercase = num_sparse_encoder_layers
__lowercase = num_layers
__lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__lowercase = self.num_layers // self.num_sparse_encoder_layers
else:
__lowercase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__lowercase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__lowercase = self.num_decoder_layers # HACK: this will create 0 sparse layers
__lowercase = num_heads
__lowercase = num_experts
__lowercase = expert_capacity
__lowercase = router_bias
__lowercase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__lowercase = router_dtype
__lowercase = router_ignore_padding_tokens
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = dropout_rate
__lowercase = layer_norm_epsilon
__lowercase = initializer_factor
__lowercase = feed_forward_proj
__lowercase = use_cache
__lowercase = add_router_probs
__lowercase = router_z_loss_coef
__lowercase = router_aux_loss_coef
__lowercase = self.feed_forward_proj.split('''-''' )
__lowercase = act_info[-1]
__lowercase = act_info[0] == '''gated'''
if len(lowercase__ ) > 1 and act_info[0] != "gated" or len(lowercase__ ) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__lowercase = '''gelu_new'''
super().__init__(
pad_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,**lowercase__ ,)
| 41 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCamelCase : List[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , ) -> Any:
"""simple docstring"""
output_path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , enable_onnx_checker=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
else:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
@torch.no_grad()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> str:
"""simple docstring"""
A = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
A = 'cpu'
A = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=UpperCamelCase__ ).to(UpperCamelCase__ )
A = Path(UpperCamelCase__ )
# TEXT ENCODER
A = pipeline.text_encoder.config.max_position_embeddings
A = pipeline.text_encoder.config.hidden_size
A = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=UpperCamelCase__ , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , )
del pipeline.text_encoder
# UNET
A = pipeline.unet.config.in_channels
A = pipeline.unet.config.sample_size
A = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=UpperCamelCase__ , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , )
A = str(unet_path.absolute().as_posix() )
A = os.path.dirname(UpperCamelCase__ )
A = onnx.load(UpperCamelCase__ )
# clean up existing tensor files
shutil.rmtree(UpperCamelCase__ )
os.mkdir(UpperCamelCase__ )
# collate external tensor files into one
onnx.save_model(
UpperCamelCase__ , UpperCamelCase__ , save_as_external_data=UpperCamelCase__ , all_tensors_to_one_file=UpperCamelCase__ , location='weights.pb' , convert_attribute=UpperCamelCase__ , )
del pipeline.unet
# VAE ENCODER
A = pipeline.vae
A = vae_encoder.config.in_channels
A = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
A = lambda UpperCamelCase__ , UpperCamelCase__ : vae_encoder.encode(UpperCamelCase__ , UpperCamelCase__ )[0].sample()
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
# VAE DECODER
A = pipeline.vae
A = vae_decoder.config.latent_channels
A = vae_decoder.config.out_channels
# forward only through the decoder part
A = vae_encoder.decode
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
A = pipeline.safety_checker
A = safety_checker.config.vision_config.num_channels
A = safety_checker.config.vision_config.image_size
A = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=UpperCamelCase__ , )
del pipeline.safety_checker
A = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
A = pipeline.feature_extractor
else:
A = None
A = None
A = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(UpperCamelCase__ )
print('ONNX pipeline saved to' , UpperCamelCase__ )
del pipeline
del onnx_pipeline
A = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : str = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 690 | 0 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> int:
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' ,[False, True] )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]:
lowerCamelCase_ = tmp_path / 'cache'
lowerCamelCase_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase_ = JsonDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ,keep_in_memory=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase ,__UpperCamelCase )
@pytest.mark.parametrize(
'features' ,[
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] ,)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
lowerCamelCase_ = tmp_path / 'cache'
lowerCamelCase_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCamelCase_ = features.copy() if features else default_expected_features
lowerCamelCase_ = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase_ = JsonDatasetReader(__UpperCamelCase ,features=__UpperCamelCase ,cache_dir=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase ,__UpperCamelCase )
@pytest.mark.parametrize(
'features' ,[
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] ,)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = tmp_path / 'cache'
lowerCamelCase_ = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
lowerCamelCase_ = features.copy() if features else default_expected_features
lowerCamelCase_ = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase_ = JsonDatasetReader(__UpperCamelCase ,features=__UpperCamelCase ,cache_dir=__UpperCamelCase ).read()
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowerCamelCase_ = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
lowerCamelCase_ = features.copy()
lowerCamelCase_ = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase_ = tmp_path / 'cache'
lowerCamelCase_ = JsonDatasetReader(__UpperCamelCase ,features=__UpperCamelCase ,cache_dir=__UpperCamelCase ).read()
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' ,[None, NamedSplit('train' ), 'train', 'test'] )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
lowerCamelCase_ = tmp_path / 'cache'
lowerCamelCase_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCamelCase_ = JsonDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ,split=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase ,__UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' ,[str, list] )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
if issubclass(__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = jsonl_path
elif issubclass(__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = [jsonl_path]
lowerCamelCase_ = tmp_path / 'cache'
lowerCamelCase_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCamelCase_ = JsonDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=("train",) ) -> Optional[int]:
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
for split in splits:
lowerCamelCase_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' ,[False, True] )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
lowerCamelCase_ = tmp_path / 'cache'
lowerCamelCase_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase_ = JsonDatasetReader({'train': jsonl_path} ,cache_dir=__UpperCamelCase ,keep_in_memory=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase ,__UpperCamelCase )
@pytest.mark.parametrize(
'features' ,[
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] ,)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any:
lowerCamelCase_ = tmp_path / 'cache'
lowerCamelCase_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCamelCase_ = features.copy() if features else default_expected_features
lowerCamelCase_ = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase_ = JsonDatasetReader({'train': jsonl_path} ,features=__UpperCamelCase ,cache_dir=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase ,__UpperCamelCase )
@pytest.mark.parametrize('split' ,[None, NamedSplit('train' ), 'train', 'test'] )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any:
if split:
lowerCamelCase_ = {split: jsonl_path}
else:
lowerCamelCase_ = 'train'
lowerCamelCase_ = {'train': jsonl_path, 'test': jsonl_path}
lowerCamelCase_ = tmp_path / 'cache'
lowerCamelCase_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCamelCase_ = JsonDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase ,__UpperCamelCase ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _UpperCamelCase ( __UpperCamelCase ) -> Union[str, Any]:
return json.load(__UpperCamelCase )
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
return [json.loads(__UpperCamelCase ) for line in buffer]
class UpperCAmelCase :
'''simple docstring'''
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , lines=SCREAMING_SNAKE_CASE_ ).write()
buffer.seek(0 )
lowerCamelCase_ = load_json_function(SCREAMING_SNAKE_CASE_ )
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE_ )
assert len(SCREAMING_SNAKE_CASE_ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , lines=SCREAMING_SNAKE_CASE_ , orient=SCREAMING_SNAKE_CASE_ ).write()
buffer.seek(0 )
lowerCamelCase_ = load_json(SCREAMING_SNAKE_CASE_ )
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE_ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE_ ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , lines=SCREAMING_SNAKE_CASE_ , num_proc=2 ).write()
buffer.seek(0 )
lowerCamelCase_ = load_json_function(SCREAMING_SNAKE_CASE_ )
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE_ )
assert len(SCREAMING_SNAKE_CASE_ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , lines=SCREAMING_SNAKE_CASE_ , orient=SCREAMING_SNAKE_CASE_ , num_proc=2 ).write()
buffer.seek(0 )
lowerCamelCase_ = load_json(SCREAMING_SNAKE_CASE_ )
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE_ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE_ ) == 10
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / f'''test.json.{extension}'''
lowerCamelCase_ = str(shared_datadir / f'''test_file.json.{extension}''' )
JsonDatasetWriter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compression=SCREAMING_SNAKE_CASE_ ).write()
with fsspec.open(SCREAMING_SNAKE_CASE_ , 'rb' , compression='infer' ) as f:
lowerCamelCase_ = f.read()
with fsspec.open(SCREAMING_SNAKE_CASE_ , 'rb' , compression='infer' ) as f:
lowerCamelCase_ = f.read()
assert exported_content == original_content
| 42 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCamelCase : List[str] = Lock()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A = min(UpperCamelCase__ , UpperCamelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A = max(UpperCamelCase__ , UpperCamelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
A = []
A = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A = temp_rs
A = temp_rr
for i in range(1 , len(UpperCamelCase__ ) - 1 ):
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A = temp_rs
A = temp_rr
process_array_.append(
Process(
target=UpperCamelCase__ , args=(
len(UpperCamelCase__ ) - 1,
arr[len(UpperCamelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCamelCase__ ) ):
A = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __snake_case ( ) -> Optional[Any]:
"""simple docstring"""
A = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*UpperCamelCase__ )
A = odd_even_transposition(UpperCamelCase__ )
print('Sorted List\n' )
print(*UpperCamelCase__ )
if __name__ == "__main__":
main()
| 690 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
lowercase__ = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowercase__ = ''''''
else:
lowercase__ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowercase__ = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[
: config.hidden_size, :
]
lowercase__ = in_proj_bias[: config.hidden_size]
lowercase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ = in_proj_bias[-config.hidden_size :]
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = dct.pop(SCREAMING_SNAKE_CASE )
lowercase__ = val
def _a ( ):
"""simple docstring"""
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
lowercase__ = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=SCREAMING_SNAKE_CASE , )
lowercase__ = ViTHybridConfig(backbone_config=SCREAMING_SNAKE_CASE , image_size=3_84 , num_labels=10_00 )
lowercase__ = False
# load original model from timm
lowercase__ = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase__ = timm_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE )
lowercase__ = create_rename_keys(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = '''huggingface/label-files'''
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowercase__ = ViTHybridModel(SCREAMING_SNAKE_CASE ).eval()
else:
lowercase__ = ViTHybridForImageClassification(SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# create image processor
lowercase__ = create_transform(**resolve_data_config({} , model=SCREAMING_SNAKE_CASE ) )
lowercase__ = transform.transforms
lowercase__ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowercase__ = ViTHybridImageProcessor(
do_resize=SCREAMING_SNAKE_CASE , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=SCREAMING_SNAKE_CASE , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=SCREAMING_SNAKE_CASE , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase__ = prepare_img()
lowercase__ = transform(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
lowercase__ = processor(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# verify logits
with torch.no_grad():
lowercase__ = model(SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
lowercase__ = timm_model.forward_features(SCREAMING_SNAKE_CASE )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1E-3 )
else:
lowercase__ = timm_model(SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(f'Pushing model and processor to the hub {vit_name}' )
model.push_to_hub(f'ybelkada/{vit_name}' )
processor.push_to_hub(f'ybelkada/{vit_name}' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
lowerCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 43 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
UpperCamelCase : int = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
UpperCamelCase : List[Any] = dataset.iloc[:, 1:2].values
UpperCamelCase : Any = dataset.iloc[:, 2].values
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = train_test_split(X, y, test_size=0.2, random_state=0)
UpperCamelCase : List[str] = PolynomialFeatures(degree=4)
UpperCamelCase : Optional[int] = poly_reg.fit_transform(X)
UpperCamelCase : List[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
plt.scatter(UpperCamelCase__ , UpperCamelCase__ , color='red' )
plt.plot(UpperCamelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase__ ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 690 | 0 |
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCAmelCase_ : Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCAmelCase_ : Union[str, Any] = [0, 25, 50]
UpperCAmelCase_ : int = [25, 50, 75]
UpperCAmelCase_ : Dict = fuzz.membership.trimf(X, abca)
UpperCAmelCase_ : Union[str, Any] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCAmelCase_ : Optional[int] = np.ones(75)
UpperCAmelCase_ : Optional[int] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCAmelCase_ : int = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCAmelCase_ : List[Any] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCAmelCase_ : Optional[int] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCAmelCase_ : Any = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCAmelCase_ : str = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCAmelCase_ : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCAmelCase_ : int = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCAmelCase_ : List[Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show() | 44 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : List[str] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = True , **_lowercase : Tuple , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A = image_std if image_std is not None else OPENAI_CLIP_STD
A = do_convert_rgb
def __a ( self : str , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Union[str, Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Optional[int] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowercase : int , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , param_name='size' , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' , default_to_square=_lowercase )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A = [convert_to_rgb(_lowercase ) for image in images]
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 690 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :Union[str, Any] , ):
UpperCamelCase__ :Tuple = parent
UpperCamelCase__ :str = 13
UpperCamelCase__ :Union[str, Any] = 7
UpperCamelCase__ :List[Any] = 30
UpperCamelCase__ :int = self.seq_length + self.mem_len
UpperCamelCase__ :Tuple = 15
UpperCamelCase__ :int = True
UpperCamelCase__ :int = True
UpperCamelCase__ :Union[str, Any] = 99
UpperCamelCase__ :Any = [10, 50, 80]
UpperCamelCase__ :List[str] = 32
UpperCamelCase__ :Optional[Any] = 32
UpperCamelCase__ :int = 4
UpperCamelCase__ :Optional[int] = 8
UpperCamelCase__ :Tuple = 1_28
UpperCamelCase__ :List[Any] = 2
UpperCamelCase__ :Optional[int] = 2
UpperCamelCase__ :Dict = None
UpperCamelCase__ :List[Any] = 1
UpperCamelCase__ :Any = 0
UpperCamelCase__ :List[str] = 3
UpperCamelCase__ :Any = self.vocab_size - 1
UpperCamelCase__ :Optional[int] = 0.01
def __a ( self :Optional[Any] ):
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Any = None
if self.use_labels:
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :int = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __a ( self :Optional[int] ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[int] ):
UpperCamelCase__ :Any = TFTransfoXLModel(lowerCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ :List[str] = model(lowerCamelCase__ ).to_tuple()
UpperCamelCase__ :int = {"""input_ids""": input_ids_a, """mems""": mems_a}
UpperCamelCase__ , UpperCamelCase__ :str = model(lowerCamelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __a ( self :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict ):
UpperCamelCase__ :Union[str, Any] = TFTransfoXLLMHeadModel(lowerCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ :str = model(lowerCamelCase__ ).to_tuple()
UpperCamelCase__ :Tuple = {"""input_ids""": input_ids_a, """labels""": lm_labels}
UpperCamelCase__ , UpperCamelCase__ :Any = model(lowerCamelCase__ ).to_tuple()
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
UpperCamelCase__ :str = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __a ( self :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :Union[str, Any] = TFTransfoXLForSequenceClassification(lowerCamelCase__ )
UpperCamelCase__ :Dict = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self :List[Any] ):
UpperCamelCase__ :Dict = self.prepare_config_and_inputs()
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :int = config_and_inputs
UpperCamelCase__ :List[Any] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : str = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_snake_case : List[str] = () if is_tf_available() else ()
_snake_case : List[str] = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_snake_case : Optional[Any] = False
_snake_case : Any = False
_snake_case : Tuple = False
_snake_case : List[Any] = False
def __a ( self :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :str ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __a ( self :List[str] ):
UpperCamelCase__ :Any = TFTransfoXLModelTester(self )
UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , d_embed=37 )
def __a ( self :str ):
self.config_tester.run_common_tests()
def __a ( self :List[str] ):
self.model_tester.set_seed()
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowerCamelCase__ )
def __a ( self :str ):
self.model_tester.set_seed()
UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowerCamelCase__ )
def __a ( self :Dict ):
UpperCamelCase__ , UpperCamelCase__ :str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :int = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
UpperCamelCase__ :Dict = model_class(lowerCamelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
UpperCamelCase__ :Optional[Any] = model.get_output_embeddings()
assert isinstance(lowerCamelCase__ , tf.keras.layers.Layer )
UpperCamelCase__ :List[Any] = model.get_bias()
assert name is None
else:
UpperCamelCase__ :Union[str, Any] = model.get_output_embeddings()
assert x is None
UpperCamelCase__ :int = model.get_bias()
assert name is None
def __a ( self :int ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __a ( self :int ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :str = TFTransfoXLModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __a ( self :Union[str, Any] ):
pass
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __a ( self :str ):
UpperCamelCase__ :int = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
UpperCamelCase__ :Tuple = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
UpperCamelCase__ :str = model.generate(lowerCamelCase__ , max_length=2_00 , do_sample=lowerCamelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase__ ) | 45 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowercase )
try:
pickle.loads(pickle.dumps(_lowercase ) )
except Exception as e:
self.fail(f'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 690 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class A_ ( _a ):
lowerCAmelCase__ = 'unispeech-sat'
def __init__( self: List[str] ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Any=12 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[Any]=0.1 ,__lowerCAmelCase: Tuple=0.1 ,__lowerCAmelCase: str=0.0 ,__lowerCAmelCase: int=0.0 ,__lowerCAmelCase: List[Any]=0.1 ,__lowerCAmelCase: List[Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.02 ,__lowerCAmelCase: Tuple=1e-5 ,__lowerCAmelCase: Tuple="group" ,__lowerCAmelCase: Optional[Any]="gelu" ,__lowerCAmelCase: List[Any]=(512, 512, 512, 512, 512, 512, 512) ,__lowerCAmelCase: Dict=(5, 2, 2, 2, 2, 2, 2) ,__lowerCAmelCase: Any=(10, 3, 3, 3, 3, 2, 2) ,__lowerCAmelCase: int=False ,__lowerCAmelCase: Union[str, Any]=128 ,__lowerCAmelCase: Optional[Any]=16 ,__lowerCAmelCase: Any=False ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Optional[int]=0.05 ,__lowerCAmelCase: str=10 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Optional[int]=0.0 ,__lowerCAmelCase: Dict=10 ,__lowerCAmelCase: Dict=0 ,__lowerCAmelCase: int=320 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Any=100 ,__lowerCAmelCase: List[Any]=256 ,__lowerCAmelCase: Any=256 ,__lowerCAmelCase: Optional[int]=0.1 ,__lowerCAmelCase: List[str]="mean" ,__lowerCAmelCase: str=False ,__lowerCAmelCase: Optional[Any]=False ,__lowerCAmelCase: Any=256 ,__lowerCAmelCase: Optional[int]=(512, 512, 512, 512, 1_500) ,__lowerCAmelCase: Optional[Any]=(5, 3, 3, 1, 1) ,__lowerCAmelCase: Optional[Any]=(1, 2, 3, 1, 1) ,__lowerCAmelCase: Dict=512 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Optional[int]=504 ,**__lowerCAmelCase: Optional[Any] ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase ,pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : str = feat_extract_norm
_lowerCamelCase : int = feat_extract_activation
_lowerCamelCase : int = list(__lowerCAmelCase )
_lowerCamelCase : List[Any] = list(__lowerCAmelCase )
_lowerCamelCase : str = list(__lowerCAmelCase )
_lowerCamelCase : Dict = conv_bias
_lowerCamelCase : List[Any] = num_conv_pos_embeddings
_lowerCamelCase : int = num_conv_pos_embedding_groups
_lowerCamelCase : List[Any] = len(self.conv_dim )
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : List[Any] = hidden_dropout
_lowerCamelCase : int = attention_dropout
_lowerCamelCase : List[Any] = activation_dropout
_lowerCamelCase : Union[str, Any] = feat_proj_dropout
_lowerCamelCase : List[Any] = final_dropout
_lowerCamelCase : Union[str, Any] = layerdrop
_lowerCamelCase : Any = layer_norm_eps
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : Any = num_clusters
_lowerCamelCase : int = do_stable_layer_norm
_lowerCamelCase : Dict = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : List[Any] = apply_spec_augment
_lowerCamelCase : List[str] = mask_time_prob
_lowerCamelCase : Optional[int] = mask_time_length
_lowerCamelCase : str = mask_time_min_masks
_lowerCamelCase : Optional[int] = mask_feature_prob
_lowerCamelCase : Any = mask_feature_length
_lowerCamelCase : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCamelCase : int = num_codevectors_per_group
_lowerCamelCase : List[Any] = num_codevector_groups
_lowerCamelCase : List[str] = contrastive_logits_temperature
_lowerCamelCase : List[Any] = feat_quantizer_dropout
_lowerCamelCase : List[Any] = num_negatives
_lowerCamelCase : Dict = codevector_dim
_lowerCamelCase : List[Any] = proj_codevector_dim
_lowerCamelCase : int = diversity_loss_weight
# ctc loss
_lowerCamelCase : Tuple = ctc_loss_reduction
_lowerCamelCase : List[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCamelCase : Any = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCamelCase : Any = list(__lowerCAmelCase )
_lowerCamelCase : Any = list(__lowerCAmelCase )
_lowerCamelCase : List[str] = list(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = xvector_output_dim
@property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 ) | 46 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """convbert"""
def __init__( self : Optional[int] , _lowercase : List[Any]=30_522 , _lowercase : List[str]=768 , _lowercase : Optional[Any]=12 , _lowercase : Any=12 , _lowercase : str=3_072 , _lowercase : List[str]="gelu" , _lowercase : Dict=0.1 , _lowercase : Dict=0.1 , _lowercase : Any=512 , _lowercase : List[str]=2 , _lowercase : Tuple=0.0_2 , _lowercase : List[Any]=1e-12 , _lowercase : List[str]=1 , _lowercase : Tuple=0 , _lowercase : Any=2 , _lowercase : Union[str, Any]=768 , _lowercase : str=2 , _lowercase : Any=9 , _lowercase : Union[str, Any]=1 , _lowercase : Dict=None , **_lowercase : Union[str, Any] , ):
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = embedding_size
A = head_ratio
A = conv_kernel_size
A = num_groups
A = classifier_dropout
class lowerCamelCase__ ( UpperCAmelCase_ ):
@property
def __a ( self : str ):
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 690 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = '''glpn'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE__ : List[Any]=[8, 4, 2, 1] , SCREAMING_SNAKE_CASE__ : str=[3_2, 6_4, 1_6_0, 2_5_6] , SCREAMING_SNAKE_CASE__ : Dict=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE__ : Tuple=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[1, 2, 5, 8] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[4, 4, 4, 4] , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-6 , SCREAMING_SNAKE_CASE__ : int=6_4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_0 , SCREAMING_SNAKE_CASE__ : Any=-1 , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
__a : List[str] = num_channels
__a : Dict = num_encoder_blocks
__a : List[str] = depths
__a : Optional[int] = sr_ratios
__a : int = hidden_sizes
__a : str = patch_sizes
__a : Union[str, Any] = strides
__a : str = mlp_ratios
__a : Optional[int] = num_attention_heads
__a : List[Any] = hidden_act
__a : Any = hidden_dropout_prob
__a : List[Any] = attention_probs_dropout_prob
__a : str = initializer_range
__a : int = drop_path_rate
__a : int = layer_norm_eps
__a : Optional[Any] = decoder_hidden_size
__a : Dict = max_depth
__a : Union[str, Any] = head_in_index
| 47 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 690 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[Any] = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :int = 'deberta-v2'
def __init__( self : Tuple , __magic_name__ : List[Any]=128100 , __magic_name__ : Tuple=1536 , __magic_name__ : int=24 , __magic_name__ : List[Any]=24 , __magic_name__ : Dict=6144 , __magic_name__ : Union[str, Any]="gelu" , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Tuple=512 , __magic_name__ : Dict=0 , __magic_name__ : Optional[int]=0.02 , __magic_name__ : Union[str, Any]=1E-7 , __magic_name__ : str=False , __magic_name__ : Any=-1 , __magic_name__ : List[str]=0 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Tuple=None , __magic_name__ : Any=0 , __magic_name__ : Optional[int]="gelu" , **__magic_name__ : int , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = relative_attention
lowerCAmelCase__ = max_relative_positions
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = position_biased_input
# Backwards compatibility
if type(__magic_name__ ) == str:
lowerCAmelCase__ = [x.strip() for x in pos_att_type.lower().split("|" )]
lowerCAmelCase__ = pos_att_type
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = kwargs.get("pooler_hidden_size" , __magic_name__ )
lowerCAmelCase__ = pooler_dropout
lowerCAmelCase__ = pooler_hidden_act
class A ( SCREAMING_SNAKE_CASE__ ):
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase__ = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return 12
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional["TensorType"] = None , __magic_name__ : int = 3 , __magic_name__ : int = 40 , __magic_name__ : int = 40 , __magic_name__ : "PreTrainedTokenizerBase" = None , ):
"""simple docstring"""
lowerCAmelCase__ = super().generate_dummy_inputs(preprocessor=__magic_name__ , framework=__magic_name__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 48 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
A = {'+', '-', '*', '/'}
A = []
for token in postfix_notation:
if token in operations:
A , A = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 0 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowercase : str = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowercase : Any = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowercase : int = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = len([g for position, g in enumerate(snake_case_ ) if g == main_target[position]] )
return (item, float(snake_case_ ))
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = random.randint(0 , len(snake_case_ ) - 1 )
__UpperCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
__UpperCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__ ( snake_case_ :str , snake_case_ :list[str] ):
__UpperCAmelCase = list(snake_case_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__UpperCAmelCase = random.choice(snake_case_ )
return "".join(snake_case_ )
def lowercase__ ( snake_case_ :tuple[str, float] , snake_case_ :list[tuple[str, float]] , snake_case_ :list[str] , ):
__UpperCAmelCase = []
# Generate more children proportionally to the fitness score.
__UpperCAmelCase = int(parent_a[1] * 100 ) + 1
__UpperCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(snake_case_ ):
__UpperCAmelCase = population_score[random.randint(0 , snake_case_ )][0]
__UpperCAmelCase , __UpperCAmelCase = crossover(parent_a[0] , snake_case_ )
# Append new string to the population list.
pop.append(mutate(snake_case_ , snake_case_ ) )
pop.append(mutate(snake_case_ , snake_case_ ) )
return pop
def lowercase__ ( snake_case_ :str , snake_case_ :list[str] , snake_case_ :bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__UpperCAmelCase = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(snake_case_ )
# Verify that the target contains no genes besides the ones inside genes variable.
__UpperCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__UpperCAmelCase = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(snake_case_ )
# Generate random starting population.
__UpperCAmelCase = []
for _ in range(snake_case_ ):
population.append(''''''.join([random.choice(snake_case_ ) for i in range(len(snake_case_ ) )] ) )
# Just some logs to know what the algorithms is doing.
__UpperCAmelCase , __UpperCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(snake_case_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__UpperCAmelCase = [evaluate(snake_case_ , snake_case_ ) for item in population]
# Check if there is a matching evolution.
__UpperCAmelCase = sorted(snake_case_ , key=lambda snake_case_ : x[1] , reverse=snake_case_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__UpperCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(snake_case_ )
# Normalize population score to be between 0 and 1.
__UpperCAmelCase = [
(item, score / len(snake_case_ )) for item, score in population_score
]
# This is selection
for i in range(snake_case_ ):
population.extend(select(population_score[int(snake_case_ )] , snake_case_ , snake_case_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(snake_case_ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowercase : Optional[int] = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
_lowercase : Union[str, Any] = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
_lowercase ,_lowercase ,_lowercase : Optional[int] = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 49 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase : Any = None
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : str = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Optional[int] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCamelCase : str = "▁"
# Segments (not really needed)
UpperCamelCase : str = 0
UpperCamelCase : int = 1
UpperCamelCase : List[Any] = 2
UpperCamelCase : Union[str, Any] = 3
UpperCamelCase : Optional[Any] = 4
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = """left"""
lowerCAmelCase = XLNetTokenizer
def __init__( self : Tuple , _lowercase : List[Any]=None , _lowercase : Any=None , _lowercase : int=False , _lowercase : Tuple=True , _lowercase : Union[str, Any]=False , _lowercase : int="<s>" , _lowercase : Optional[int]="</s>" , _lowercase : Dict="<unk>" , _lowercase : Optional[int]="<sep>" , _lowercase : int="<pad>" , _lowercase : Dict="<cls>" , _lowercase : str="<mask>" , _lowercase : List[str]=["<eop>", "<eod>"] , **_lowercase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
A = 3
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def __a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 0 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int = 100_0000 ):
lowerCamelCase__ = limit + 1
lowerCamelCase__ = [0] * limit
for first_term in range(1 , __lowerCAmelCase ):
for n in range(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowerCamelCase__ = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }')
| 50 |
"""simple docstring"""
from __future__ import annotations
UpperCamelCase : Any = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the reference grid
A = 1
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the action grid
A = init[0]
A = init[1]
A = 0
A = g + heuristic[x][y] # cost from starting cell to destination cell
A = [[f, g, x, y]]
A = False # flag that is set when search is complete
A = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A = cell.pop()
A = next_cell[2]
A = next_cell[3]
A = next_cell[1]
if x == goal[0] and y == goal[1]:
A = True
else:
for i in range(len(UpperCamelCase__ ) ): # to try out different valid actions
A = x + DIRECTIONS[i][0]
A = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A = g + cost
A = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A = 1
A = i
A = []
A = goal[0]
A = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A = x - DIRECTIONS[action[x][y]][0]
A = y - DIRECTIONS[action[x][y]][1]
A = xa
A = ya
invpath.append([x, y] )
A = []
for i in range(len(UpperCamelCase__ ) ):
path.append(invpath[len(UpperCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase : Any = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase : List[Any] = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase : Tuple = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase : Union[str, Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase : List[str] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase : Dict = 99
UpperCamelCase , UpperCamelCase : Optional[Any] = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 690 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =VideoToVideoSDPipeline
_lowerCamelCase =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
_lowerCamelCase =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
_lowerCamelCase =PipelineTesterMixin.required_optional_params - {"latents"}
_lowerCamelCase =False
# No `output_type`.
_lowerCamelCase =frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def __snake_case ( self : List[str] ):
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase = CLIPTextModel(a__ )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def __snake_case ( self : Any , a__ : int , a__ : int=0 ):
# 3 frames
UpperCAmelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
if str(a__ ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(a__ )
else:
UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def __snake_case ( self : Dict ):
UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = VideoToVideoSDPipeline(**a__ )
UpperCAmelCase = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
UpperCAmelCase = self.get_dummy_inputs(a__ )
UpperCAmelCase = '''np'''
UpperCAmelCase = sd_pipe(**a__ ).frames
UpperCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __snake_case ( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a__ , expected_max_diff=5e-3 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def __snake_case ( self : List[str] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def __snake_case ( self : Any ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def __snake_case ( self : str ):
pass
def __snake_case ( self : List[str] ):
return super().test_progress_bar()
@slow
@skip_mps
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : List[str] ):
UpperCAmelCase = VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase = torch.randn((1, 10, 3, 1024, 576) , generator=a__ )
UpperCAmelCase = video.to('''cuda''' )
UpperCAmelCase = '''Spiderman is surfing'''
UpperCAmelCase = pipe(a__ , video=a__ , generator=a__ , num_inference_steps=3 , output_type='''pt''' ).frames
UpperCAmelCase = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 51 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : int = {"vocab_file": "sentencepiece.model"}
UpperCamelCase : Union[str, Any] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
UpperCamelCase : Union[str, Any] = {
"google/rembert": 256,
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Dict=True , _lowercase : List[str]=True , _lowercase : int="[CLS]" , _lowercase : str="[SEP]" , _lowercase : List[str]="[UNK]" , _lowercase : List[Any]="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : List[str]="[CLS]" , _lowercase : Any="[MASK]" , **_lowercase : Optional[Any] , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def __a ( self : Tuple ):
return len(self.sp_model )
def __a ( self : List[str] ):
A = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : List[str] , _lowercase : int ):
A = d
A = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __a ( self : Dict , _lowercase : Union[str, Any] , _lowercase : Dict=False ):
A = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def __a ( self : Dict , _lowercase : Tuple ):
return self.sp_model.PieceToId(_lowercase )
def __a ( self : str , _lowercase : Optional[int] ):
return self.sp_model.IdToPiece(_lowercase )
def __a ( self : Optional[int] , _lowercase : Optional[int] ):
A = self.sp_model.decode_pieces(_lowercase )
return out_string
def __a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def __a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('Vocabulary path ({}) should be a directory'.format(_lowercase ) )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 0 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def __A ( a_ :str) -> List[Any]:
def decorator(a_ :List[Any]):
__a : List[str] = getattr(a_ , '''handle_key''' , [])
handle += [key]
setattr(a_ , '''handle_key''' , a_)
return func
return decorator
def __A ( *a_ :List[str]) -> Optional[int]:
def decorator(a_ :int):
__a : Tuple = getattr(a_ , '''handle_key''' , [])
handle += keys
setattr(a_ , '''handle_key''' , a_)
return func
return decorator
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __new__( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Tuple = super().__new__(cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not hasattr(_UpperCAmelCase , '''key_handler''' ):
setattr(_UpperCAmelCase , '''key_handler''' , {} )
setattr(_UpperCAmelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
__a : Dict = getattr(_UpperCAmelCase , '''handle_key''' , [] )
for key in handled_keys:
__a : Union[str, Any] = value
return new_cls
@staticmethod
def _lowerCamelCase ( cls ):
__a : Dict = get_character()
if char != KEYMAP["undefined"]:
__a : str = ord(_UpperCAmelCase )
__a : Tuple = cls.key_handler.get(_UpperCAmelCase )
if handler:
__a : Union[str, Any] = char
return handler(cls )
else:
return None
def __A ( cls :Union[str, Any]) -> str:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy()) | 52 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : List[Any] = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
UpperCamelCase : Any = {"mobilebert-uncased": 512}
UpperCamelCase : Any = {}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = MobileBertTokenizer
def __init__( self : Optional[int] , _lowercase : Optional[int]=None , _lowercase : Any=None , _lowercase : Optional[int]=True , _lowercase : int="[UNK]" , _lowercase : Dict="[SEP]" , _lowercase : Any="[PAD]" , _lowercase : str="[CLS]" , _lowercase : Union[str, Any]="[MASK]" , _lowercase : List[Any]=True , _lowercase : Any=None , **_lowercase : Optional[Any] , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowercase ) != tokenize_chinese_chars
):
A = getattr(_lowercase , normalizer_state.pop('type' ) )
A = do_lower_case
A = strip_accents
A = tokenize_chinese_chars
A = normalizer_class(**_lowercase )
A = do_lower_case
def __a ( self : List[Any] , _lowercase : Tuple , _lowercase : Any=None ):
A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 690 | 0 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = (UnCLIPScheduler,)
def lowercase ( self : str , **lowerCAmelCase_ : str ) -> str:
__lowerCAmelCase = {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**lowerCAmelCase_ )
return config
def lowercase ( self : Optional[Any] ) -> Optional[int]:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> List[Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def lowercase ( self : int ) -> Union[str, Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> List[Any]:
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Any:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def lowercase ( self : int ) -> Tuple:
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowerCAmelCase_ , prev_timestep=lowerCAmelCase_ )
def lowercase ( self : Any ) -> Dict:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(variance_type='fixed_small_log' )
__lowerCAmelCase = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_54_96_25 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_99_49_87 ) ) < 1e-5
def lowercase ( self : List[Any] ) -> int:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(variance_type='learned_range' )
__lowerCAmelCase = scheduler_class(**lowerCAmelCase_ )
__lowerCAmelCase = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowerCAmelCase_ ) - -10.1_71_27_90 < 1e-5
assert scheduler._get_variance(4_8_7 , predicted_variance=lowerCAmelCase_ ) - -5.7_99_80_52 < 1e-5
assert scheduler._get_variance(9_9_9 , predicted_variance=lowerCAmelCase_ ) - -0.0_01_00_11 < 1e-5
def lowercase ( self : Dict ) -> Optional[int]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**lowerCAmelCase_ )
__lowerCAmelCase = scheduler.timesteps
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
__lowerCAmelCase = torch.manual_seed(0 )
for i, t in enumerate(lowerCAmelCase_ ):
# 1. predict noise residual
__lowerCAmelCase = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(lowerCAmelCase_ ) )
__lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1e-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1e-3
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(2_5 )
__lowerCAmelCase = scheduler.timesteps
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
__lowerCAmelCase = torch.manual_seed(0 )
for i, t in enumerate(lowerCAmelCase_ ):
# 1. predict noise residual
__lowerCAmelCase = model(lowerCAmelCase_ , lowerCAmelCase_ )
if i + 1 == timesteps.shape[0]:
__lowerCAmelCase = None
else:
__lowerCAmelCase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase = scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , prev_timestep=lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(lowerCAmelCase_ ) )
__lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1e-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1e-3
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
pass
def lowercase ( self : Tuple ) -> Tuple:
pass
| 53 |
"""simple docstring"""
def __snake_case ( UpperCamelCase__ ) -> list[int]:
"""simple docstring"""
A = [0 for i in range(len(UpperCamelCase__ ) )]
# initialize interval's left pointer and right pointer
A , A = 0, 0
for i in range(1 , len(UpperCamelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
A = min(right_pointer - i + 1 , z_result[i - left_pointer] )
A = min_edge
while go_next(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
A , A = i, i + z_result[i] - 1
return z_result
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
"""simple docstring"""
return i + z_result[i] < len(UpperCamelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
A = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCamelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 0 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__lowercase : Optional[Any] =logging.get_logger(__name__)
class A ( __lowercase ):
def __init__( self: int , *_lowerCAmelCase: Optional[Any] , **_lowerCAmelCase: Dict ) -> None:
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 54 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = LDMTextToImagePipeline
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase = False
def __a ( self : Dict ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A = CLIPTextModel(_lowercase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __a ( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any]=0 ):
if str(_lowercase ).startswith('mps' ):
A = torch.manual_seed(_lowercase )
else:
A = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Any ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = LDMTextToImagePipeline(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_dummy_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
A = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : int , _lowercase : List[Any] , _lowercase : int=torch.floataa , _lowercase : int=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Union[str, Any] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
A = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
A = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Tuple=torch.floataa , _lowercase : Optional[Any]=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : List[str] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images[0]
A = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
A = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 690 | 0 |
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 55 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase , cache_dir=_lowercase )
A = [t[-1] for t in os.walk(os.path.join(_lowercase , os.listdir(_lowercase )[0] , 'snapshots' ) )]
A = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 4
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(_lowercase , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
A = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_lowercase ) == num_samples
def __a ( self : Dict ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __a ( self : List[str] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : str ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : Any ):
A = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=_lowercase , steps_offset=1 , )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_lowercase , safety_checker=_lowercase , )
A = scheduler.create_state()
A = scheduler_state
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __a ( self : List[str] ):
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.device_count()
A = num_samples * [prompt]
A = jax.random.split(jax.random.PRNGKey(0 ) , _lowercase )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , use_memory_efficient_attention=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 690 | 0 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowercase ( unittest.TestCase ):
def a ( self : Any ) -> Any:
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__snake_case = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(SCREAMING_SNAKE_CASE_ )
__snake_case = -1
__snake_case = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
__snake_case = model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__snake_case = TextStreamer(SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ , streamer=SCREAMING_SNAKE_CASE_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__snake_case = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> Any:
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__snake_case = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(SCREAMING_SNAKE_CASE_ )
__snake_case = -1
__snake_case = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
__snake_case = model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(greedy_ids[0] )
__snake_case = TextIteratorStreamer(SCREAMING_SNAKE_CASE_ )
__snake_case = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
__snake_case = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE_ )
thread.start()
__snake_case = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> List[str]:
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__snake_case = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(SCREAMING_SNAKE_CASE_ )
__snake_case = -1
__snake_case = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
__snake_case = model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ )
__snake_case = greedy_ids[:, input_ids.shape[1] :]
__snake_case = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__snake_case = TextStreamer(SCREAMING_SNAKE_CASE_ , skip_prompt=SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ , streamer=SCREAMING_SNAKE_CASE_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__snake_case = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : List[Any] ) -> int:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__snake_case = AutoTokenizer.from_pretrained('distilgpt2' )
__snake_case = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(SCREAMING_SNAKE_CASE_ )
__snake_case = -1
__snake_case = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__snake_case = TextStreamer(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE_ , streamer=SCREAMING_SNAKE_CASE_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__snake_case = cs.out[:-1] # Remove the final "\n"
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def a ( self : List[Any] ) -> Union[str, Any]:
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__snake_case = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(SCREAMING_SNAKE_CASE_ )
__snake_case = -1
__snake_case = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
__snake_case = TextIteratorStreamer(SCREAMING_SNAKE_CASE_ , timeout=0.0_0_1 )
__snake_case = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
__snake_case = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__snake_case = ''
for new_text in streamer:
streamer_text += new_text
| 56 |
"""simple docstring"""
import os
import sys
UpperCamelCase : Optional[int] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase : Dict = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
"""simple docstring"""
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 690 | 0 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="resnet50" , _lowerCamelCase=3 , _lowerCamelCase=3_2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , ):
UpperCamelCase_: str = parent
UpperCamelCase_: Optional[Any] = out_indices if out_indices is not None else [4]
UpperCamelCase_: int = stage_names
UpperCamelCase_: Tuple = out_features
UpperCamelCase_: Tuple = backbone
UpperCamelCase_: Union[str, Any] = batch_size
UpperCamelCase_: int = image_size
UpperCamelCase_: Dict = num_channels
UpperCamelCase_: Optional[Any] = use_pretrained_backbone
UpperCamelCase_: Optional[int] = is_training
def _a ( self ):
UpperCamelCase_: Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_: Optional[int] = self.get_config()
return config, pixel_values
def _a ( self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = TimmBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_: int = model(_lowerCamelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def _a ( self ):
UpperCamelCase_: Optional[int] = self.prepare_config_and_inputs()
UpperCamelCase_ ,UpperCamelCase_: Optional[int] = config_and_inputs
UpperCamelCase_: Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Any =(TimmBackbone,) if is_torch_available() else ()
a : Any ={'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
a : Dict =False
a : Optional[int] =False
a : List[Any] =False
a : int =False
def _a ( self ):
UpperCamelCase_: int = TimmBackboneModelTester(self )
UpperCamelCase_: Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _a ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self ):
UpperCamelCase_: str = 'resnet18'
UpperCamelCase_: Union[str, Any] = 'microsoft/resnet-18'
UpperCamelCase_: List[Any] = AutoBackbone.from_pretrained(_lowerCamelCase , use_timm_backbone=_lowerCamelCase )
UpperCamelCase_: Tuple = AutoBackbone.from_pretrained(_lowerCamelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
UpperCamelCase_: Any = AutoBackbone.from_pretrained(_lowerCamelCase , use_timm_backbone=_lowerCamelCase , out_indices=[1, 2, 3] )
UpperCamelCase_: Union[str, Any] = AutoBackbone.from_pretrained(_lowerCamelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def _a ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def _a ( self ):
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def _a ( self ):
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def _a ( self ):
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def _a ( self ):
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def _a ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def _a ( self ):
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def _a ( self ):
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def _a ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def _a ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def _a ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def _a ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def _a ( self ):
pass
@unittest.skip('Safetensors is not supported by timm.' )
def _a ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _a ( self ):
pass
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: Union[str, Any] = model_class(_lowerCamelCase )
UpperCamelCase_: Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_: str = [*signature.parameters.keys()]
UpperCamelCase_: int = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_: Optional[Any] = True
UpperCamelCase_: Optional[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
UpperCamelCase_: Union[str, Any] = self.all_model_classes[0]
UpperCamelCase_: Tuple = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Tuple = model(**_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = outputs[0][-1]
# Encoder-/Decoder-only models
UpperCamelCase_: List[str] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
UpperCamelCase_: List[str] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_lowerCamelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: List[str] = model(**_lowerCamelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
UpperCamelCase_: Dict = copy.deepcopy(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = None
UpperCamelCase_: Union[str, Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Dict = model(**_lowerCamelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
UpperCamelCase_: Union[str, Any] = copy.deepcopy(_lowerCamelCase )
UpperCamelCase_: List[Any] = False
UpperCamelCase_: Optional[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: List[Any] = model(**_lowerCamelCase ) | 57 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : Tuple , _lowercase : bool = True , _lowercase : Optional[Dict[str, int]] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[str] , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Any , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[Any] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : float , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Any , _lowercase : ImageInput , _lowercase : Optional[bool] = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[float] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowercase : Any , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def __a ( self : int , _lowercase : List[str] , _lowercase : List[Tuple] = None ):
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_lowercase ):
A = target_sizes.numpy()
A = []
for idx in range(len(_lowercase ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_lowercase )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 690 | 0 |
"""simple docstring"""
import random
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : float , __UpperCamelCase : bool = False ):
'''simple docstring'''
snake_case_ : dict = {i: [] for i in range(__UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(__UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(__UpperCamelCase ):
for j in range(i + 1 , __UpperCamelCase ):
if random.random() < probability:
graph[i].append(__UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(__UpperCamelCase )
return graph
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return {
i: [j for j in range(__UpperCamelCase ) if i != j] for i in range(__UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __snake_case ( UpperCamelCase__ = "laptop" ) -> DataFrame:
"""simple docstring"""
A = f'https://www.amazon.in/laptop/s?k={product}'
A = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
A = BeautifulSoup(requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
A = item.ha.text
A = 'https://www.amazon.in/' + item.ha.a['href']
A = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
A = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
A = 'Not available'
try:
A = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
A = ''
try:
A = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
A = float('nan' )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = ' '
A = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCamelCase : Any = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 690 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "Salesforce/blip-image-captioning-base"
lowercase_ = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
lowercase_ = "image_captioner"
lowercase_ = AutoModelForVisionaSeq
lowercase_ = ["image"]
lowercase_ = ["text"]
def __init__(self : str , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int) ->int:
'''simple docstring'''
requires_backends(self , ["vision"])
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : "Image") ->Optional[Any]:
'''simple docstring'''
return self.pre_processor(images=UpperCAmelCase_ , return_tensors="pt")
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Any) ->int:
'''simple docstring'''
return self.model.generate(**UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Dict) ->List[Any]:
'''simple docstring'''
return self.pre_processor.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_)[0].strip()
| 59 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : List[str] , _lowercase : Optional[Any] , _lowercase : int=7 , _lowercase : List[str]=3 , _lowercase : Tuple=18 , _lowercase : Dict=30 , _lowercase : Any=400 , _lowercase : int=True , _lowercase : List[Any]=None , _lowercase : Tuple=True , _lowercase : List[Any]=False , _lowercase : str=True , _lowercase : List[str]=True , _lowercase : int=[0.5, 0.5, 0.5] , _lowercase : Optional[int]=[0.5, 0.5, 0.5] , ):
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size if size is not None else {'height': 18, 'width': 20}
A = do_thumbnail
A = do_align_axis
A = do_pad
A = do_normalize
A = image_mean
A = image_std
def __a ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
A = DonutImageProcessingTester(self )
@property
def __a ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Union[str, Any] ):
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , 'do_resize' ) )
self.assertTrue(hasattr(_lowercase , 'size' ) )
self.assertTrue(hasattr(_lowercase , 'do_thumbnail' ) )
self.assertTrue(hasattr(_lowercase , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_lowercase , 'do_pad' ) )
self.assertTrue(hasattr(_lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowercase , 'image_mean' ) )
self.assertTrue(hasattr(_lowercase , 'image_std' ) )
def __a ( self : int ):
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
A = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def __a ( self : Any ):
pass
@is_flaky()
def __a ( self : int ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[str] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[Any] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 690 | 0 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__=None , **__magic_name__ ) -> Dict:
'''simple docstring'''
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , __magic_name__ , )
super().__init__(args=__magic_name__ , **__magic_name__ )
| 60 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase__ :
def __init__( self : Optional[Any] , _lowercase : int=2 , _lowercase : Optional[Any]=3 , _lowercase : Any=64 , _lowercase : Tuple=None ):
A = np.random.default_rng(_lowercase )
A = length
A = rng.normal(size=(length,) ).astype(np.floataa )
A = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : str ):
return self.length
def __getitem__( self : List[str] , _lowercase : int ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[int] , _lowercase : Any=0 , _lowercase : List[Any]=0 , _lowercase : Optional[int]=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = True
def __a ( self : Optional[Any] , _lowercase : str=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a[0] + self.b[0]
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[Any] , _lowercase : Any=0 , _lowercase : List[str]=0 , _lowercase : str=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = True
def __a ( self : int , _lowercase : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a + self.b
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ = 16 ) -> Optional[Any]:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
A = AutoTokenizer.from_pretrained('bert-base-cased' )
A = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
A = load_dataset('csv' , data_files=UpperCamelCase__ )
A = datasets['train'].unique('label' )
A = {v: i for i, v in enumerate(UpperCamelCase__ )}
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
if "label" in examples:
A = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(UpperCamelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=2 )
A = DataLoader(tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 690 | 0 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
UpperCamelCase = True
except ImportError:
UpperCamelCase = False
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _A ( lowerCAmelCase_ : Namespace ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
@staticmethod
def a ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> int:
lowerCAmelCase__ = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=SCREAMING_SNAKE_CASE__ , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=SCREAMING_SNAKE_CASE__ , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , *SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
lowerCAmelCase__ = testing
lowerCAmelCase__ = testing_file
lowerCAmelCase__ = path
def a ( self : Union[str, Any] ) -> Tuple:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCAmelCase__ = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
lowerCAmelCase__ = (
Path(SCREAMING_SNAKE_CASE__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCAmelCase__ = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(SCREAMING_SNAKE_CASE__ ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=SCREAMING_SNAKE_CASE__ , extra_context=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = configuration["lowercase_modelname"]
lowerCAmelCase__ = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'{directory}/configuration.json' )
lowerCAmelCase__ = "PyTorch" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase__ = "TensorFlow" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase__ = "Flax" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase__ = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=SCREAMING_SNAKE_CASE__ )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , "w" ):
pass
shutil.move(
f'{directory}/__init__.py' , f'{model_dir}/__init__.py' , )
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' , f'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(SCREAMING_SNAKE_CASE__ : Dict ):
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
lowerCAmelCase__ = f.readlines()
with open(SCREAMING_SNAKE_CASE__ , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(SCREAMING_SNAKE_CASE__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' , f'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' , f'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' , f'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' , f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ):
# Create temp file
lowerCAmelCase__ , lowerCAmelCase__ = mkstemp()
lowerCAmelCase__ = False
with fdopen(SCREAMING_SNAKE_CASE__ , "w" ) as new_file:
with open(SCREAMING_SNAKE_CASE__ ) as old_file:
for line in old_file:
new_file.write(SCREAMING_SNAKE_CASE__ )
if line_to_copy_below in line:
lowerCAmelCase__ = True
for line_to_copy in lines_to_copy:
new_file.write(SCREAMING_SNAKE_CASE__ )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Remove original file
remove(SCREAMING_SNAKE_CASE__ )
# Move new file
move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def skip_units(SCREAMING_SNAKE_CASE__ : List[Any] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(SCREAMING_SNAKE_CASE__ : List[Any] ):
with open(SCREAMING_SNAKE_CASE__ ) as datafile:
lowerCAmelCase__ = []
lowerCAmelCase__ = False
lowerCAmelCase__ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCAmelCase__ = line.split("\"" )[1]
lowerCAmelCase__ = skip_units(SCREAMING_SNAKE_CASE__ )
elif "# Below: " in line and "##" not in line:
lowerCAmelCase__ = line.split("\"" )[1]
lowerCAmelCase__ = skip_units(SCREAMING_SNAKE_CASE__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = []
elif "# Replace with" in line and "##" not in line:
lowerCAmelCase__ = []
elif "##" not in line:
lines_to_copy.append(SCREAMING_SNAKE_CASE__ )
remove(SCREAMING_SNAKE_CASE__ )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(SCREAMING_SNAKE_CASE__ )
| 61 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase__ ) -> list[int]: # This function is recursive
"""simple docstring"""
A = len(UpperCamelCase__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
A = array[0]
A = False
A = 1
A = []
while not is_found and i < array_length:
if array[i] < pivot:
A = True
A = [element for element in array[i:] if element >= array[i]]
A = longest_subsequence(UpperCamelCase__ )
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
A = temp_array
else:
i += 1
A = [element for element in array[1:] if element >= pivot]
A = [pivot, *longest_subsequence(UpperCamelCase__ )]
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def lowerCamelCase__ ( lowercase , lowercase , lowercase=8 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : DDPMScheduler , UpperCAmelCase_ : VQModel , ):
super().__init__()
self.register_modules(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , movq=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _A ( self : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] ):
if latents is None:
SCREAMING_SNAKE_CASE : List[str] = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
SCREAMING_SNAKE_CASE : Tuple = latents.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = latents * scheduler.init_noise_sigma
return latents
def _A ( self : List[str] , UpperCAmelCase_ : Optional[int]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
SCREAMING_SNAKE_CASE : Tuple = torch.device(f'''cuda:{gpu_id}''' )
SCREAMING_SNAKE_CASE : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : int , UpperCAmelCase_ : str=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
SCREAMING_SNAKE_CASE : Tuple = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=UpperCAmelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE : Optional[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = cpu_offload_with_hook(UpperCAmelCase_ , UpperCAmelCase_ , prev_module_hook=UpperCAmelCase_ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _A ( self : List[Any] ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase_ )
def __call__( self : int , UpperCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 100 , UpperCAmelCase_ : float = 4.0 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , ):
SCREAMING_SNAKE_CASE : str = self._execution_device
SCREAMING_SNAKE_CASE : str = guidance_scale > 1.0
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Dict = torch.cat(UpperCAmelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : List[str] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = torch.cat(UpperCAmelCase_ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE : Dict = image_embeds.repeat_interleave(UpperCAmelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Any = negative_image_embeds.repeat_interleave(UpperCAmelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase_ )
self.scheduler.set_timesteps(UpperCAmelCase_ , device=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.timesteps
SCREAMING_SNAKE_CASE : Tuple = self.unet.config.in_channels
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = downscale_height_and_width(UpperCAmelCase_ , UpperCAmelCase_ , self.movq_scale_factor )
# create initial latent
SCREAMING_SNAKE_CASE : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE : List[str] = {"image_embeds": image_embeds}
SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet(
sample=UpperCAmelCase_ , timestep=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , added_cond_kwargs=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ , )[0]
# post-processing
SCREAMING_SNAKE_CASE : List[str] = self.movq.decode(UpperCAmelCase_ , force_not_quantize=UpperCAmelCase_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE : Dict = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE : Dict = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : List[Any] = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase_ )
| 62 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase : Optional[int] = typing.Union[np.floataa, int, float] # noqa: UP007
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(UpperCamelCase__ ) - np.asarray(UpperCamelCase__ )) ** 2 ) )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(UpperCamelCase__ , UpperCamelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def __snake_case ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
benchmark()
| 690 | 0 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a : Optional[int] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = 'linear'
a : int = 'cosine'
a : Optional[Any] = 'cosine_with_restarts'
a : Dict = 'polynomial'
a : Tuple = 'constant'
a : Dict = 'constant_with_warmup'
a : Any = 'piecewise_constant'
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int = -1 ):
return LambdaLR(__lowerCamelCase , lambda __lowerCamelCase : 1 , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1.0 , __lowerCamelCase ) )
return 1.0
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : str , __lowerCamelCase : int = -1 ):
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Tuple = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = rule_str.split(""":""" )
__UpperCAmelCase : Any = int(__lowerCamelCase )
__UpperCAmelCase : List[str] = float(__lowerCamelCase )
__UpperCAmelCase : int = value
__UpperCAmelCase : Any = float(rule_list[-1] )
def create_rules_function(__lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
def rule_func(__lowerCamelCase : int ) -> float:
__UpperCAmelCase : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase : str = create_rules_function(__lowerCamelCase , __lowerCamelCase )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=-1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 0.5 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Dict ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Union[str, Any] ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=1E-7 , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : int=-1 ):
__UpperCAmelCase : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase : Optional[Any] = lr_init - lr_end
__UpperCAmelCase : Union[str, Any] = num_training_steps - num_warmup_steps
__UpperCAmelCase : int = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( __lowerCamelCase : Union[str, SchedulerType] , __lowerCamelCase : Optimizer , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : int = -1 , ):
__UpperCAmelCase : Union[str, Any] = SchedulerType(__lowerCamelCase )
__UpperCAmelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowerCamelCase , last_epoch=__lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowerCamelCase , step_rules=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowerCamelCase , num_warmup_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , num_cycles=__lowerCamelCase , last_epoch=__lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , power=__lowerCamelCase , last_epoch=__lowerCamelCase , )
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
| 63 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCamelCase : List[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , ) -> Any:
"""simple docstring"""
output_path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , enable_onnx_checker=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
else:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
@torch.no_grad()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> str:
"""simple docstring"""
A = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
A = 'cpu'
A = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=UpperCamelCase__ ).to(UpperCamelCase__ )
A = Path(UpperCamelCase__ )
# TEXT ENCODER
A = pipeline.text_encoder.config.max_position_embeddings
A = pipeline.text_encoder.config.hidden_size
A = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=UpperCamelCase__ , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , )
del pipeline.text_encoder
# UNET
A = pipeline.unet.config.in_channels
A = pipeline.unet.config.sample_size
A = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=UpperCamelCase__ , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , )
A = str(unet_path.absolute().as_posix() )
A = os.path.dirname(UpperCamelCase__ )
A = onnx.load(UpperCamelCase__ )
# clean up existing tensor files
shutil.rmtree(UpperCamelCase__ )
os.mkdir(UpperCamelCase__ )
# collate external tensor files into one
onnx.save_model(
UpperCamelCase__ , UpperCamelCase__ , save_as_external_data=UpperCamelCase__ , all_tensors_to_one_file=UpperCamelCase__ , location='weights.pb' , convert_attribute=UpperCamelCase__ , )
del pipeline.unet
# VAE ENCODER
A = pipeline.vae
A = vae_encoder.config.in_channels
A = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
A = lambda UpperCamelCase__ , UpperCamelCase__ : vae_encoder.encode(UpperCamelCase__ , UpperCamelCase__ )[0].sample()
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
# VAE DECODER
A = pipeline.vae
A = vae_decoder.config.latent_channels
A = vae_decoder.config.out_channels
# forward only through the decoder part
A = vae_encoder.decode
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
A = pipeline.safety_checker
A = safety_checker.config.vision_config.num_channels
A = safety_checker.config.vision_config.image_size
A = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=UpperCamelCase__ , )
del pipeline.safety_checker
A = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
A = pipeline.feature_extractor
else:
A = None
A = None
A = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(UpperCamelCase__ )
print('ONNX pipeline saved to' , UpperCamelCase__ )
del pipeline
del onnx_pipeline
A = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : str = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 690 | 0 |
def A__ ( snake_case_ : list ):
if len(snake_case_ ) <= 1:
return [tuple(snake_case_ )]
SCREAMING_SNAKE_CASE__: Union[str, Any]= []
def generate(snake_case_ : int , snake_case_ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= arr[k - 1], arr[i]
else: # k is odd
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= arr[k - 1], arr[0]
generate(k - 1 , snake_case_ )
generate(len(snake_case_ ) , snake_case_ )
return res
if __name__ == "__main__":
lowercase_ : Any = input('Enter numbers separated by a comma:\n').strip()
lowercase_ : str = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 64 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCamelCase : List[str] = Lock()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A = min(UpperCamelCase__ , UpperCamelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A = max(UpperCamelCase__ , UpperCamelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
A = []
A = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A = temp_rs
A = temp_rr
for i in range(1 , len(UpperCamelCase__ ) - 1 ):
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A = temp_rs
A = temp_rr
process_array_.append(
Process(
target=UpperCamelCase__ , args=(
len(UpperCamelCase__ ) - 1,
arr[len(UpperCamelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCamelCase__ ) ):
A = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __snake_case ( ) -> Optional[Any]:
"""simple docstring"""
A = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*UpperCamelCase__ )
A = odd_even_transposition(UpperCamelCase__ )
print('Sorted List\n' )
print(*UpperCamelCase__ )
if __name__ == "__main__":
main()
| 690 | 0 |
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __lowercase ( __lowerCamelCase ):
snake_case_ = """"""
snake_case_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
snake_case_ = None # compression type in fsspec. ex: "gzip"
snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Optional[int] ,A : str = "" ,A : Optional[str] = None ,A : Optional[dict] = None ,**A : Any ):
'''simple docstring'''
super().__init__(self ,**A )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCAmelCase__ : Optional[int] = fsspec.open(
A ,mode="""rb""" ,protocol=A ,compression=self.compression ,client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" ,{} ), # To avoid issues if it was already passed.
} ,**(target_options or {}) ,)
UpperCAmelCase__ : int = os.path.basename(self.file.path.split("""::""" )[0] )
UpperCAmelCase__ : List[Any] = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
UpperCAmelCase__ : Optional[int] = None
@classmethod
def __lowercase ( cls : Optional[Any] ,A : Any ):
'''simple docstring'''
# compressed file paths are always relative to the archive root
return super()._strip_protocol(A ).lstrip("""/""" )
def __lowercase ( self : str ):
'''simple docstring'''
if self.dir_cache is None:
UpperCAmelCase__ : List[str] = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
UpperCAmelCase__ : List[str] = {f["""name"""]: f}
def __lowercase ( self : Dict ,A : str ):
'''simple docstring'''
return self.file.open().read()
def __lowercase ( self : Optional[int] ,A : str ,A : str = "rb" ,A : str=None ,A : List[Any]=True ,A : List[str]=None ,**A : Tuple ,):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self._strip_protocol(A )
if mode != "rb":
raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class __lowercase ( __lowerCamelCase ):
snake_case_ = """bz2"""
snake_case_ = """bz2"""
snake_case_ = """.bz2"""
class __lowercase ( __lowerCamelCase ):
snake_case_ = """gzip"""
snake_case_ = """gzip"""
snake_case_ = """.gz"""
class __lowercase ( __lowerCamelCase ):
snake_case_ = """lz4"""
snake_case_ = """lz4"""
snake_case_ = """.lz4"""
class __lowercase ( __lowerCamelCase ):
snake_case_ = """xz"""
snake_case_ = """xz"""
snake_case_ = """.xz"""
class __lowercase ( __lowerCamelCase ):
snake_case_ = """zstd"""
snake_case_ = """zstd"""
snake_case_ = """.zst"""
def __init__( self : Union[str, Any] ,A : str ,A : str = "rb" ,A : Optional[str] = None ,A : Optional[dict] = None ,A : int = DEFAULT_BLOCK_SIZE ,**A : List[str] ,):
'''simple docstring'''
super().__init__(
fo=A ,mode=A ,target_protocol=A ,target_options=A ,block_size=A ,**A ,)
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCAmelCase__ : str = self.file.__enter__
class __lowercase :
def __init__( self : Tuple ,A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = file_
def __enter__( self : List[Any] ):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : List[Any] ,*A : Union[str, Any] ,**A : Tuple ):
'''simple docstring'''
self._file.__exit__(*A ,**A )
def __iter__( self : Tuple ):
'''simple docstring'''
return iter(self._file )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return next(self._file )
def __getattr__( self : Tuple ,A : Any ):
'''simple docstring'''
return getattr(self._file ,A )
def fixed_enter(*A : List[Any] ,**A : List[str] ):
return WrappedFile(_enter(*A ,**A ) )
UpperCAmelCase__ : str = fixed_enter
| 65 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
UpperCamelCase : int = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
UpperCamelCase : List[Any] = dataset.iloc[:, 1:2].values
UpperCamelCase : Any = dataset.iloc[:, 2].values
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = train_test_split(X, y, test_size=0.2, random_state=0)
UpperCamelCase : List[str] = PolynomialFeatures(degree=4)
UpperCamelCase : Optional[int] = poly_reg.fit_transform(X)
UpperCamelCase : List[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
plt.scatter(UpperCamelCase__ , UpperCamelCase__ , color='red' )
plt.plot(UpperCamelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase__ ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 690 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase : str = tempfile.mkdtemp()
# fmt: off
_lowercase : str = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_lowercase : Any = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
_lowercase : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_lowercase : List[str] = {'unk_token': '<unk>'}
_lowercase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowercase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_lowerCAmelCase ) )
_lowercase : int = {
'do_resize': True,
'size': 2_0,
'do_center_crop': True,
'crop_size': 1_8,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
_lowercase : str = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self , **_lowerCAmelCase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , **_lowerCAmelCase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , **_lowerCAmelCase ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self ):
shutil.rmtree(self.tmpdirname )
def __a ( self ):
_lowercase : Dict = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_lowercase : int = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __a ( self ):
_lowercase : Dict = self.get_tokenizer()
_lowercase : Any = self.get_rust_tokenizer()
_lowercase : List[str] = self.get_image_processor()
_lowercase : List[Any] = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
_lowercase : Union[str, Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCAmelCase )
_lowercase : Dict = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
_lowercase : int = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowercase : List[str] = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
_lowercase : List[Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def __a ( self ):
_lowercase : Dict = self.get_image_processor()
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : Any = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
_lowercase : Any = self.prepare_image_inputs()
_lowercase : int = image_processor(_lowerCAmelCase , return_tensors='np' )
_lowercase : List[Any] = processor(images=_lowerCAmelCase , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __a ( self ):
_lowercase : Any = self.get_image_processor()
_lowercase : Any = self.get_tokenizer()
_lowercase : str = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
_lowercase : Dict = 'lower newer'
_lowercase : Any = processor(text=_lowerCAmelCase )
_lowercase : Tuple = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __a ( self ):
_lowercase : int = self.get_image_processor()
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : int = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
_lowercase : int = 'lower newer'
_lowercase : Optional[Any] = self.prepare_image_inputs()
_lowercase : List[str] = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def __a ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Tuple = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
_lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : int = processor.batch_decode(_lowerCAmelCase )
_lowercase : Dict = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.get_image_processor()
_lowercase : Optional[Any] = self.get_tokenizer()
_lowercase : Any = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
_lowercase : List[Any] = 'lower newer'
_lowercase : List[str] = self.prepare_image_inputs()
_lowercase : Dict = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 66 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : List[str] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = True , **_lowercase : Tuple , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A = image_std if image_std is not None else OPENAI_CLIP_STD
A = do_convert_rgb
def __a ( self : str , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Union[str, Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Optional[int] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowercase : int , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , param_name='size' , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' , default_to_square=_lowercase )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A = [convert_to_rgb(_lowercase ) for image in images]
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 690 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case = {
"""configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""AlbertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""AlbertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AlbertForMaskedLM""",
"""AlbertForMultipleChoice""",
"""AlbertForPreTraining""",
"""AlbertForQuestionAnswering""",
"""AlbertForSequenceClassification""",
"""AlbertForTokenClassification""",
"""AlbertModel""",
"""AlbertPreTrainedModel""",
"""load_tf_weights_in_albert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAlbertForMaskedLM""",
"""TFAlbertForMultipleChoice""",
"""TFAlbertForPreTraining""",
"""TFAlbertForQuestionAnswering""",
"""TFAlbertForSequenceClassification""",
"""TFAlbertForTokenClassification""",
"""TFAlbertMainLayer""",
"""TFAlbertModel""",
"""TFAlbertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""FlaxAlbertForMaskedLM""",
"""FlaxAlbertForMultipleChoice""",
"""FlaxAlbertForPreTraining""",
"""FlaxAlbertForQuestionAnswering""",
"""FlaxAlbertForSequenceClassification""",
"""FlaxAlbertForTokenClassification""",
"""FlaxAlbertModel""",
"""FlaxAlbertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowercase )
try:
pickle.loads(pickle.dumps(_lowercase ) )
except Exception as e:
self.fail(f'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 690 | 0 |
import math
import os
import sys
def lowercase__ ( A_: str ) -> str:
"""simple docstring"""
__UpperCAmelCase =""""""
try:
with open(A_ , """rb""" ) as binary_file:
__UpperCAmelCase =binary_file.read()
for dat in data:
__UpperCAmelCase =F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def lowercase__ ( A_: dict[str, str] , A_: str , A_: int , A_: str ) -> None:
"""simple docstring"""
lexicon.pop(A_ )
__UpperCAmelCase =last_match_id
if math.loga(A_ ).is_integer():
for curr_key in lexicon:
__UpperCAmelCase ="""0""" + lexicon[curr_key]
__UpperCAmelCase =bin(A_ )[2:]
def lowercase__ ( A_: str ) -> str:
"""simple docstring"""
__UpperCAmelCase ={"""0""": """0""", """1""": """1"""}
__UpperCAmelCase , __UpperCAmelCase ="""""", """"""
__UpperCAmelCase =len(A_ )
for i in range(len(A_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__UpperCAmelCase =lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(A_ , A_ , A_ , A_ )
index += 1
__UpperCAmelCase =""""""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__UpperCAmelCase =lexicon[curr_string]
result += last_match_id
return result
def lowercase__ ( A_: str , A_: str ) -> str:
"""simple docstring"""
__UpperCAmelCase =os.path.getsize(A_ )
__UpperCAmelCase =bin(A_ )[2:]
__UpperCAmelCase =len(A_ )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowercase__ ( A_: str , A_: str ) -> None:
"""simple docstring"""
__UpperCAmelCase =8
try:
with open(A_ , """wb""" ) as opened_file:
__UpperCAmelCase =[
to_write[i : i + byte_length]
for i in range(0 , len(A_ ) , A_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(A_ , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def lowercase__ ( A_: str , A_: str ) -> None:
"""simple docstring"""
__UpperCAmelCase =read_file_binary(A_ )
__UpperCAmelCase =compress_data(A_ )
__UpperCAmelCase =add_file_length(A_ , A_ )
write_file_binary(A_ , A_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 68 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """convbert"""
def __init__( self : Optional[int] , _lowercase : List[Any]=30_522 , _lowercase : List[str]=768 , _lowercase : Optional[Any]=12 , _lowercase : Any=12 , _lowercase : str=3_072 , _lowercase : List[str]="gelu" , _lowercase : Dict=0.1 , _lowercase : Dict=0.1 , _lowercase : Any=512 , _lowercase : List[str]=2 , _lowercase : Tuple=0.0_2 , _lowercase : List[Any]=1e-12 , _lowercase : List[str]=1 , _lowercase : Tuple=0 , _lowercase : Any=2 , _lowercase : Union[str, Any]=768 , _lowercase : str=2 , _lowercase : Any=9 , _lowercase : Union[str, Any]=1 , _lowercase : Dict=None , **_lowercase : Union[str, Any] , ):
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = embedding_size
A = head_ratio
A = conv_kernel_size
A = num_groups
A = classifier_dropout
class lowerCamelCase__ ( UpperCAmelCase_ ):
@property
def __a ( self : str ):
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 690 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = ConsistencyModelPipeline
__SCREAMING_SNAKE_CASE = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__SCREAMING_SNAKE_CASE = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__SCREAMING_SNAKE_CASE = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def A ( self : int ):
"""simple docstring"""
__snake_case = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def A ( self : Union[str, Any] , a_ : Dict=False ):
"""simple docstring"""
if class_cond:
__snake_case = self.dummy_cond_unet
else:
__snake_case = self.dummy_uncond_unet
# Default to CM multistep sampler
__snake_case = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__snake_case = {
"unet": unet,
"scheduler": scheduler,
}
return components
def A ( self : List[Any] , a_ : Tuple , a_ : Dict=0 ):
"""simple docstring"""
if str(a_ ).startswith("mps" ):
__snake_case = torch.manual_seed(a_ )
else:
__snake_case = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = ConsistencyModelPipeline(**a_ )
__snake_case = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = self.get_dummy_inputs(a_ )
__snake_case = pipe(**a_ ).images
assert image.shape == (1, 32, 32, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components(class_cond=a_ )
__snake_case = ConsistencyModelPipeline(**a_ )
__snake_case = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = self.get_dummy_inputs(a_ )
__snake_case = 0
__snake_case = pipe(**a_ ).images
assert image.shape == (1, 32, 32, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = ConsistencyModelPipeline(**a_ )
__snake_case = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = self.get_dummy_inputs(a_ )
__snake_case = 1
__snake_case = None
__snake_case = pipe(**a_ ).images
assert image.shape == (1, 32, 32, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : Dict ):
"""simple docstring"""
__snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components(class_cond=a_ )
__snake_case = ConsistencyModelPipeline(**a_ )
__snake_case = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = self.get_dummy_inputs(a_ )
__snake_case = 1
__snake_case = None
__snake_case = 0
__snake_case = pipe(**a_ ).images
assert image.shape == (1, 32, 32, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : int , a_ : Dict=0 , a_ : int=False , a_ : Dict="cpu" , a_ : Any=torch.floataa , a_ : Optional[int]=(1, 3, 64, 64) ):
"""simple docstring"""
__snake_case = torch.manual_seed(a_ )
__snake_case = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
__snake_case = self.get_fixed_latents(seed=a_ , device=a_ , dtype=a_ , shape=a_ )
__snake_case = latents
return inputs
def A ( self : Any , a_ : Optional[int]=0 , a_ : Tuple="cpu" , a_ : List[Any]=torch.floataa , a_ : Optional[Any]=(1, 3, 64, 64) ):
"""simple docstring"""
if type(a_ ) == str:
__snake_case = torch.device(a_ )
__snake_case = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case = randn_tensor(a_ , generator=a_ , device=a_ , dtype=a_ )
return latents
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
__snake_case = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__snake_case = ConsistencyModelPipeline(unet=a_ , scheduler=a_ )
pipe.to(torch_device=a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = self.get_inputs()
__snake_case = pipe(**a_ ).images
assert image.shape == (1, 64, 64, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
__snake_case = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__snake_case = ConsistencyModelPipeline(unet=a_ , scheduler=a_ )
pipe.to(torch_device=a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = self.get_inputs()
__snake_case = 1
__snake_case = None
__snake_case = pipe(**a_ ).images
assert image.shape == (1, 64, 64, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def A ( self : str ):
"""simple docstring"""
__snake_case = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
__snake_case = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__snake_case = ConsistencyModelPipeline(unet=a_ , scheduler=a_ )
pipe.to(torch_device=a_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = self.get_inputs(get_fixed_latents=a_ , device=a_ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a_ , enable_math=a_ , enable_mem_efficient=a_ ):
__snake_case = pipe(**a_ ).images
assert image.shape == (1, 64, 64, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
__snake_case = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__snake_case = ConsistencyModelPipeline(unet=a_ , scheduler=a_ )
pipe.to(torch_device=a_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = self.get_inputs(get_fixed_latents=a_ , device=a_ )
__snake_case = 1
__snake_case = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a_ , enable_math=a_ , enable_mem_efficient=a_ ):
__snake_case = pipe(**a_ ).images
assert image.shape == (1, 64, 64, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 69 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 690 | 0 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def _SCREAMING_SNAKE_CASE ( *lowercase : Any ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
lowerCamelCase_ = list(lowercase )
for i in range(len(lowercase ) ):
lowerCamelCase_ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def _SCREAMING_SNAKE_CASE ( lowercase : Exception ):
'''simple docstring'''
lowerCamelCase_ = [
'CUDA out of memory.', # CUDA OOM
'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU
'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM
]
if isinstance(lowercase , lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def _SCREAMING_SNAKE_CASE ( lowercase : callable = None , lowercase : int = 1_28 ):
'''simple docstring'''
if function is None:
return functools.partial(lowercase , starting_batch_size=lowercase )
lowerCamelCase_ = starting_batch_size
def decorator(*lowercase : Optional[int] , **lowercase : Any ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
lowerCamelCase_ = list(inspect.signature(lowercase ).parameters.keys() )
# Guard against user error
if len(lowercase ) < (len(lowercase ) + 1):
lowerCamelCase_ = ', '.join([f"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f"""Batch size was passed into `{function.__name__}` as the first argument when called."""
f"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError('No executable batch size found, reached zero.' )
try:
return function(lowercase , *lowercase , **lowercase )
except Exception as e:
if should_reduce_batch_size(lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 70 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
A = {'+', '-', '*', '/'}
A = []
for token in postfix_notation:
if token in operations:
A , A = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 0 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
UpperCAmelCase_ : List[str] = tau * frequency / samplerate
UpperCAmelCase_ : Tuple = sin(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = cos(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : Any = (1 - _cos) / 2
UpperCAmelCase_ : int = 1 - _cos
UpperCAmelCase_ : str = 1 + alpha
UpperCAmelCase_ : List[str] = -2 * _cos
UpperCAmelCase_ : Optional[int] = 1 - alpha
UpperCAmelCase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = tau * frequency / samplerate
UpperCAmelCase_ : Dict = sin(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = cos(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : Union[str, Any] = (1 + _cos) / 2
UpperCAmelCase_ : str = -1 - _cos
UpperCAmelCase_ : List[str] = 1 + alpha
UpperCAmelCase_ : List[str] = -2 * _cos
UpperCAmelCase_ : List[str] = 1 - alpha
UpperCAmelCase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
UpperCAmelCase_ : Any = tau * frequency / samplerate
UpperCAmelCase_ : Optional[int] = sin(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = cos(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : str = _sin / 2
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Optional[int] = -ba
UpperCAmelCase_ : Dict = 1 + alpha
UpperCAmelCase_ : Any = -2 * _cos
UpperCAmelCase_ : List[str] = 1 - alpha
UpperCAmelCase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
UpperCAmelCase_ : List[str] = tau * frequency / samplerate
UpperCAmelCase_ : Tuple = sin(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = cos(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = _sin / (2 * q_factor)
UpperCAmelCase_ : Tuple = 1 - alpha
UpperCAmelCase_ : Dict = -2 * _cos
UpperCAmelCase_ : Optional[Any] = 1 + alpha
UpperCAmelCase_ : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = tau * frequency / samplerate
UpperCAmelCase_ : int = sin(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = cos(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = _sin / (2 * q_factor)
UpperCAmelCase_ : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase_ : Dict = 1 + alpha * big_a
UpperCAmelCase_ : Dict = -2 * _cos
UpperCAmelCase_ : Tuple = 1 - alpha * big_a
UpperCAmelCase_ : Any = 1 + alpha / big_a
UpperCAmelCase_ : str = -2 * _cos
UpperCAmelCase_ : List[str] = 1 - alpha / big_a
UpperCAmelCase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
UpperCAmelCase_ : List[str] = tau * frequency / samplerate
UpperCAmelCase_ : Dict = sin(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = cos(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = _sin / (2 * q_factor)
UpperCAmelCase_ : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase_ : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase_ : Tuple = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase_ : str = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase_ : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase_ : int = 2 * sqrt(_SCREAMING_SNAKE_CASE ) * alpha
UpperCAmelCase_ : Optional[Any] = big_a * (pmc + aaa)
UpperCAmelCase_ : Any = 2 * big_a * mpc
UpperCAmelCase_ : Any = big_a * (pmc - aaa)
UpperCAmelCase_ : Tuple = ppmc + aaa
UpperCAmelCase_ : int = -2 * pmpc
UpperCAmelCase_ : Any = ppmc - aaa
UpperCAmelCase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
UpperCAmelCase_ : Any = tau * frequency / samplerate
UpperCAmelCase_ : int = sin(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = cos(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : Optional[int] = 10 ** (gain_db / 40)
UpperCAmelCase_ : List[str] = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase_ : str = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase_ : List[str] = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase_ : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase_ : Optional[Any] = 2 * sqrt(_SCREAMING_SNAKE_CASE ) * alpha
UpperCAmelCase_ : int = big_a * (ppmc + aaa)
UpperCAmelCase_ : Tuple = -2 * big_a * pmpc
UpperCAmelCase_ : Tuple = big_a * (ppmc - aaa)
UpperCAmelCase_ : List[Any] = pmc + aaa
UpperCAmelCase_ : List[str] = 2 * mpc
UpperCAmelCase_ : Dict = pmc - aaa
UpperCAmelCase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 71 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase : Any = None
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : str = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Optional[int] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCamelCase : str = "▁"
# Segments (not really needed)
UpperCamelCase : str = 0
UpperCamelCase : int = 1
UpperCamelCase : List[Any] = 2
UpperCamelCase : Union[str, Any] = 3
UpperCamelCase : Optional[Any] = 4
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = """left"""
lowerCAmelCase = XLNetTokenizer
def __init__( self : Tuple , _lowercase : List[Any]=None , _lowercase : Any=None , _lowercase : int=False , _lowercase : Tuple=True , _lowercase : Union[str, Any]=False , _lowercase : int="<s>" , _lowercase : Optional[int]="</s>" , _lowercase : Dict="<unk>" , _lowercase : Optional[int]="<sep>" , _lowercase : int="<pad>" , _lowercase : Dict="<cls>" , _lowercase : str="<mask>" , _lowercase : List[str]=["<eop>", "<eod>"] , **_lowercase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
A = 3
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def __a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'canine'
def __init__( self , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_=30_72 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_63_84 , snake_case_=16 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0 , snake_case_=0XE000 , snake_case_=0XE001 , snake_case_=4 , snake_case_=4 , snake_case_=8 , snake_case_=1_63_84 , snake_case_=1_28 , **snake_case_ , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
lowercase =max_position_embeddings
lowercase =hidden_size
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =intermediate_size
lowercase =hidden_act
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =initializer_range
lowercase =type_vocab_size
lowercase =layer_norm_eps
# Character config:
lowercase =downsampling_rate
lowercase =upsampling_kernel_size
lowercase =num_hash_functions
lowercase =num_hash_buckets
lowercase =local_transformer_stride
| 72 |
"""simple docstring"""
from __future__ import annotations
UpperCamelCase : Any = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the reference grid
A = 1
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the action grid
A = init[0]
A = init[1]
A = 0
A = g + heuristic[x][y] # cost from starting cell to destination cell
A = [[f, g, x, y]]
A = False # flag that is set when search is complete
A = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A = cell.pop()
A = next_cell[2]
A = next_cell[3]
A = next_cell[1]
if x == goal[0] and y == goal[1]:
A = True
else:
for i in range(len(UpperCamelCase__ ) ): # to try out different valid actions
A = x + DIRECTIONS[i][0]
A = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A = g + cost
A = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A = 1
A = i
A = []
A = goal[0]
A = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A = x - DIRECTIONS[action[x][y]][0]
A = y - DIRECTIONS[action[x][y]][1]
A = xa
A = ya
invpath.append([x, y] )
A = []
for i in range(len(UpperCamelCase__ ) ):
path.append(invpath[len(UpperCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase : Any = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase : List[Any] = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase : Tuple = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase : Union[str, Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase : List[str] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase : Dict = 99
UpperCamelCase , UpperCamelCase : Optional[Any] = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 690 | 0 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
a_ : int = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class _snake_case ( A__ ):
def __init__( self , **a) -> Dict:
super().__init__(**a)
requires_backends(self , 'vision')
requires_backends(self , 'torch')
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''')
self.check_model_type(a)
def SCREAMING_SNAKE_CASE__ ( self , **a) -> Tuple:
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
# preprocess args
if "points_per_batch" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , a , *a , a=None , a=None , **a) -> List[str]:
return super().__call__(a , *a , num_workers=a , batch_size=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a , a=64 , a = 0 , a = 512 / 1500 , a = 32 , a = 1 , ) -> List[Any]:
SCREAMING_SNAKE_CASE = load_image(a)
SCREAMING_SNAKE_CASE = self.image_processor.size['longest_edge']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor.generate_crop_boxes(
a , a , a , a , a , a)
SCREAMING_SNAKE_CASE = self.image_processor(images=a , return_tensors='pt')
with self.device_placement():
if self.framework == "pt":
SCREAMING_SNAKE_CASE = self.get_inference_context()
with inference_context():
SCREAMING_SNAKE_CASE = self._ensure_tensor_on_device(a , device=self.device)
SCREAMING_SNAKE_CASE = self.model.get_image_embeddings(model_inputs.pop('pixel_values'))
SCREAMING_SNAKE_CASE = image_embeddings
SCREAMING_SNAKE_CASE = grid_points.shape[1]
SCREAMING_SNAKE_CASE = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None')
for i in range(0 , a , a):
SCREAMING_SNAKE_CASE = grid_points[:, i : i + points_per_batch, :, :]
SCREAMING_SNAKE_CASE = input_labels[:, i : i + points_per_batch]
SCREAMING_SNAKE_CASE = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def SCREAMING_SNAKE_CASE__ ( self , a , a=0.88 , a=0.95 , a=0 , a=1 , ) -> Tuple:
SCREAMING_SNAKE_CASE = model_inputs.pop('input_boxes')
SCREAMING_SNAKE_CASE = model_inputs.pop('is_last')
SCREAMING_SNAKE_CASE = model_inputs.pop('original_sizes').tolist()
SCREAMING_SNAKE_CASE = model_inputs.pop('reshaped_input_sizes').tolist()
SCREAMING_SNAKE_CASE = self.model(**a)
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
SCREAMING_SNAKE_CASE = model_outputs['pred_masks']
SCREAMING_SNAKE_CASE = self.image_processor.post_process_masks(
a , a , a , a , binarize=a)
SCREAMING_SNAKE_CASE = model_outputs['iou_scores']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , a , a , a , a , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def SCREAMING_SNAKE_CASE__ ( self , a , a=False , a=False , a=0.7 , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores'))
all_masks.extend(model_output.pop('masks'))
all_boxes.append(model_output.pop('boxes'))
SCREAMING_SNAKE_CASE = torch.cat(a)
SCREAMING_SNAKE_CASE = torch.cat(a)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor.post_process_for_mask_generation(
a , a , a , a)
SCREAMING_SNAKE_CASE = defaultdict(a)
for output in model_outputs:
for k, v in output.items():
extra[k].append(a)
SCREAMING_SNAKE_CASE = {}
if output_rle_mask:
SCREAMING_SNAKE_CASE = rle_mask
if output_bboxes_mask:
SCREAMING_SNAKE_CASE = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 73 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : int = {"vocab_file": "sentencepiece.model"}
UpperCamelCase : Union[str, Any] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
UpperCamelCase : Union[str, Any] = {
"google/rembert": 256,
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Dict=True , _lowercase : List[str]=True , _lowercase : int="[CLS]" , _lowercase : str="[SEP]" , _lowercase : List[str]="[UNK]" , _lowercase : List[Any]="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : List[str]="[CLS]" , _lowercase : Any="[MASK]" , **_lowercase : Optional[Any] , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def __a ( self : Tuple ):
return len(self.sp_model )
def __a ( self : List[str] ):
A = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : List[str] , _lowercase : int ):
A = d
A = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __a ( self : Dict , _lowercase : Union[str, Any] , _lowercase : Dict=False ):
A = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def __a ( self : Dict , _lowercase : Tuple ):
return self.sp_model.PieceToId(_lowercase )
def __a ( self : str , _lowercase : Optional[int] ):
return self.sp_model.IdToPiece(_lowercase )
def __a ( self : Optional[int] , _lowercase : Optional[int] ):
A = self.sp_model.decode_pieces(_lowercase )
return out_string
def __a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def __a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('Vocabulary path ({}) should be a directory'.format(_lowercase ) )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''swin'''
lowerCAmelCase_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : int , _A : int=224 , _A : Any=4 , _A : List[Any]=3 , _A : int=96 , _A : Dict=[2, 2, 6, 2] , _A : Optional[int]=[3, 6, 12, 24] , _A : int=7 , _A : Optional[Any]=4.0 , _A : Optional[int]=True , _A : Optional[int]=0.0 , _A : int=0.0 , _A : Tuple=0.1 , _A : Tuple="gelu" , _A : List[Any]=False , _A : Optional[int]=0.02 , _A : Optional[int]=1e-5 , _A : Any=32 , _A : int=None , _A : Dict=None , **_A : Dict , ):
"""simple docstring"""
super().__init__(**_A )
__SCREAMING_SNAKE_CASE : Any = image_size
__SCREAMING_SNAKE_CASE : List[str] = patch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
__SCREAMING_SNAKE_CASE : List[str] = depths
__SCREAMING_SNAKE_CASE : List[str] = len(_A )
__SCREAMING_SNAKE_CASE : str = num_heads
__SCREAMING_SNAKE_CASE : Optional[int] = window_size
__SCREAMING_SNAKE_CASE : Tuple = mlp_ratio
__SCREAMING_SNAKE_CASE : Tuple = qkv_bias
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : int = drop_path_rate
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : Dict = use_absolute_embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__SCREAMING_SNAKE_CASE : Any = int(embed_dim * 2 ** (len(_A ) - 1) )
__SCREAMING_SNAKE_CASE : Optional[int] = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(_A ) + 1 )]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = get_aligned_output_features_output_indices(
out_features=_A , out_indices=_A , stage_names=self.stage_names )
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return 1e-4
| 74 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : List[Any] = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
UpperCamelCase : Any = {"mobilebert-uncased": 512}
UpperCamelCase : Any = {}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = MobileBertTokenizer
def __init__( self : Optional[int] , _lowercase : Optional[int]=None , _lowercase : Any=None , _lowercase : Optional[int]=True , _lowercase : int="[UNK]" , _lowercase : Dict="[SEP]" , _lowercase : Any="[PAD]" , _lowercase : str="[CLS]" , _lowercase : Union[str, Any]="[MASK]" , _lowercase : List[Any]=True , _lowercase : Any=None , **_lowercase : Optional[Any] , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowercase ) != tokenize_chinese_chars
):
A = getattr(_lowercase , normalizer_state.pop('type' ) )
A = do_lower_case
A = strip_accents
A = tokenize_chinese_chars
A = normalizer_class(**_lowercase )
A = do_lower_case
def __a ( self : List[Any] , _lowercase : Tuple , _lowercase : Any=None ):
A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 690 | 0 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ = 50 ) -> int:
UpperCAmelCase__ : int = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 75 |
"""simple docstring"""
def __snake_case ( UpperCamelCase__ ) -> list[int]:
"""simple docstring"""
A = [0 for i in range(len(UpperCamelCase__ ) )]
# initialize interval's left pointer and right pointer
A , A = 0, 0
for i in range(1 , len(UpperCamelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
A = min(right_pointer - i + 1 , z_result[i - left_pointer] )
A = min_edge
while go_next(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
A , A = i, i + z_result[i] - 1
return z_result
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
"""simple docstring"""
return i + z_result[i] < len(UpperCamelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
A = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCamelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 0 |
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCAmelCase_ ( snake_case ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> List[Any]:
super().__init__(
UpperCamelCase_ , split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
__lowercase : List[Any] = field
__lowercase : List[str] = path_or_paths if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else {self.split: path_or_paths}
__lowercase : Dict = Json(
cache_dir=UpperCamelCase_ , data_files=UpperCamelCase_ , features=UpperCamelCase_ , field=UpperCamelCase_ , **UpperCamelCase_ , )
def _lowerCamelCase ( self ) -> str:
# Build iterable dataset
if self.streaming:
__lowercase : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowercase : List[Any] = None
__lowercase : Any = None
__lowercase : Union[str, Any] = None
__lowercase : Optional[int] = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
__lowercase : str = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> str:
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
__lowercase : Any = dataset
__lowercase : Dict = path_or_buf
__lowercase : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__lowercase : List[str] = num_proc
__lowercase : Optional[Any] = '''utf-8'''
__lowercase : Tuple = to_json_kwargs
def _lowerCamelCase ( self ) -> int:
__lowercase : str = self.to_json_kwargs.pop('''path_or_buf''' , UpperCamelCase_ )
__lowercase : str = self.to_json_kwargs.pop('''orient''' , '''records''' )
__lowercase : List[Any] = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
__lowercase : Any = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
__lowercase : Tuple = self.to_json_kwargs.pop('''compression''' , UpperCamelCase_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=UpperCamelCase_ ) as buffer:
__lowercase : List[str] = self._write(file_obj=UpperCamelCase_ , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
''' was passed. Please provide a local path instead.''' )
__lowercase : Tuple = self._write(
file_obj=self.path_or_buf , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **self.to_json_kwargs )
return written
def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]:
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase : Dict = args
__lowercase : Union[str, Any] = query_table(
table=self.dataset.data , key=slice(UpperCamelCase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__lowercase : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=UpperCamelCase_ , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **UpperCamelCase_ )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ , ) -> int:
__lowercase : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
__lowercase : str = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(UpperCamelCase_ )
else:
__lowercase ,__lowercase : Dict = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , UpperCamelCase_ , UpperCamelCase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(UpperCamelCase_ )
return written
| 76 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = LDMTextToImagePipeline
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase = False
def __a ( self : Dict ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A = CLIPTextModel(_lowercase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __a ( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any]=0 ):
if str(_lowercase ).startswith('mps' ):
A = torch.manual_seed(_lowercase )
else:
A = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Any ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = LDMTextToImagePipeline(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_dummy_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
A = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : int , _lowercase : List[Any] , _lowercase : int=torch.floataa , _lowercase : int=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Union[str, Any] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
A = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
A = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Tuple=torch.floataa , _lowercase : Optional[Any]=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : List[str] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images[0]
A = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
A = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 690 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a__ ( __magic_name__ ):
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : str = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(UpperCamelCase_ , "width_multiplier"))
class a__ :
def __init__( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : str=13 , UpperCamelCase_ : Optional[Any]=64 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : Dict="swish" , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[Any]=32 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : int=True , UpperCamelCase_ : Union[str, Any]=10 , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[int]=0.25 , UpperCamelCase_ : Optional[Any]=0.0 , UpperCamelCase_ : Optional[Any]=0.0 , ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = parent
__UpperCAmelCase : Tuple = batch_size
__UpperCAmelCase : Union[str, Any] = image_size
__UpperCAmelCase : Any = patch_size
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8)
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = conv_kernel_size
__UpperCAmelCase : List[Any] = output_stride
__UpperCAmelCase : int = classifier_dropout_prob
__UpperCAmelCase : Any = use_labels
__UpperCAmelCase : List[Any] = is_training
__UpperCAmelCase : Any = num_labels
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : Union[str, Any] = scope
__UpperCAmelCase : int = width_multiplier
__UpperCAmelCase : Optional[int] = ffn_dropout
__UpperCAmelCase : Optional[int] = attn_dropout
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase : str = None
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels)
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
__UpperCAmelCase : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def a_ ( self : Union[str, Any]):
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def a_ ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str):
"""simple docstring"""
__UpperCAmelCase : str = MobileViTVaModel(config=UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a_ ( self : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Dict = self.num_labels
__UpperCAmelCase : int = MobileViTVaForImageClassification(UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
__UpperCAmelCase : Any = model(UpperCamelCase_ , labels=UpperCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def a_ ( self : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple):
"""simple docstring"""
__UpperCAmelCase : Any = self.num_labels
__UpperCAmelCase : List[str] = MobileViTVaForSemanticSegmentation(UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
__UpperCAmelCase : Tuple = model(UpperCamelCase_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ , labels=UpperCamelCase_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : str = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase_ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Dict = MobileViTVaModelTester(self)
__UpperCAmelCase : List[Any] = MobileViTVaConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_)
def a_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds")
def a_ ( self : Optional[int]):
"""simple docstring"""
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings")
def a_ ( self : List[Any]):
"""simple docstring"""
pass
@unittest.skip(reason="MobileViTV2 does not output attentions")
def a_ ( self : List[Any]):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run.")
def a_ ( self : str):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def a_ ( self : List[str]):
"""simple docstring"""
pass
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[Any] = model_class(UpperCamelCase_)
__UpperCAmelCase : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase_)
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_)
def a_ ( self : Optional[int]):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int]):
__UpperCAmelCase : str = model_class(UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
with torch.no_grad():
__UpperCAmelCase : Dict = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_))
__UpperCAmelCase : Tuple = outputs.hidden_states
__UpperCAmelCase : Tuple = 5
self.assertEqual(len(UpperCamelCase_) , UpperCamelCase_)
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__UpperCAmelCase : Dict = 2
for i in range(len(UpperCamelCase_)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2)
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_)
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_)
@slow
def a_ ( self : Dict):
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[str] = MobileViTVaModel.from_pretrained(UpperCamelCase_)
self.assertIsNotNone(UpperCamelCase_)
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def a_ ( self : Optional[int]):
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256")
if is_vision_available()
else None
)
@slow
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256").to(
UpperCamelCase_)
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : str = prepare_img()
__UpperCAmelCase : Union[str, Any] = image_processor(images=UpperCamelCase_ , return_tensors="pt").to(UpperCamelCase_)
# forward pass
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**UpperCamelCase_)
# verify the logits
__UpperCAmelCase : str = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , UpperCamelCase_)
__UpperCAmelCase : List[str] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01]).to(UpperCamelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4))
@slow
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3")
__UpperCAmelCase : int = model.to(UpperCamelCase_)
__UpperCAmelCase : Dict = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3")
__UpperCAmelCase : Dict = prepare_img()
__UpperCAmelCase : Optional[Any] = image_processor(images=UpperCamelCase_ , return_tensors="pt").to(UpperCamelCase_)
# forward pass
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**UpperCamelCase_)
__UpperCAmelCase : Tuple = outputs.logits
# verify the logits
__UpperCAmelCase : int = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape , UpperCamelCase_)
__UpperCAmelCase : List[str] = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=UpperCamelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1e-4))
@slow
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3")
__UpperCAmelCase : Union[str, Any] = model.to(UpperCamelCase_)
__UpperCAmelCase : int = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3")
__UpperCAmelCase : List[str] = prepare_img()
__UpperCAmelCase : Dict = image_processor(images=UpperCamelCase_ , return_tensors="pt").to(UpperCamelCase_)
# forward pass
with torch.no_grad():
__UpperCAmelCase : List[str] = model(**UpperCamelCase_)
__UpperCAmelCase : int = outputs.logits.detach().cpu()
__UpperCAmelCase : int = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ , target_sizes=[(50, 60)])
__UpperCAmelCase : Tuple = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape , UpperCamelCase_)
__UpperCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_)
__UpperCAmelCase : Tuple = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape , UpperCamelCase_)
| 77 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase , cache_dir=_lowercase )
A = [t[-1] for t in os.walk(os.path.join(_lowercase , os.listdir(_lowercase )[0] , 'snapshots' ) )]
A = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 4
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(_lowercase , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
A = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_lowercase ) == num_samples
def __a ( self : Dict ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __a ( self : List[str] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : str ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : Any ):
A = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=_lowercase , steps_offset=1 , )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_lowercase , safety_checker=_lowercase , )
A = scheduler.create_state()
A = scheduler_state
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __a ( self : List[str] ):
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.device_count()
A = num_samples * [prompt]
A = jax.random.split(jax.random.PRNGKey(0 ) , _lowercase )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , use_memory_efficient_attention=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 690 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: str =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
UpperCAmelCase_ = getattr(snake_case_ , snake_case_ ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase_ = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(snake_case_ )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "weight" in name:
UpperCAmelCase_ = "weight"
elif "bias" in name:
UpperCAmelCase_ = "bias"
else:
UpperCAmelCase_ = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : str=None , snake_case_ : Tuple=None , snake_case_ : Dict=True ) -> Optional[int]:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ = HubertConfig.from_pretrained(snake_case_ )
else:
UpperCAmelCase_ = HubertConfig()
if is_finetuned:
if dict_path:
UpperCAmelCase_ = Dictionary.load(snake_case_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase_ = target_dict.pad_index
UpperCAmelCase_ = target_dict.bos_index
UpperCAmelCase_ = target_dict.eos_index
UpperCAmelCase_ = len(target_dict.symbols )
UpperCAmelCase_ = os.path.join(snake_case_ , "vocab.json" )
if not os.path.isdir(snake_case_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(snake_case_ ) )
return
os.makedirs(snake_case_ , exist_ok=snake_case_ )
with open(snake_case_ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , snake_case_ )
UpperCAmelCase_ = WavaVecaCTCTokenizer(
snake_case_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=snake_case_ , )
UpperCAmelCase_ = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
UpperCAmelCase_ = WavaVecaProcessor(feature_extractor=snake_case_ , tokenizer=snake_case_ )
processor.save_pretrained(snake_case_ )
UpperCAmelCase_ = HubertForCTC(snake_case_ )
else:
UpperCAmelCase_ = HubertModel(snake_case_ )
if is_finetuned:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
UpperCAmelCase_ = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
SCREAMING_SNAKE_CASE_: Optional[Any] =parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 78 |
"""simple docstring"""
import os
import sys
UpperCamelCase : Optional[int] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase : Dict = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
"""simple docstring"""
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 690 | 0 |
import requests
SCREAMING_SNAKE_CASE__ : Optional[Any] = """""" # <-- Put your OpenWeatherMap appid here!
SCREAMING_SNAKE_CASE__ : Optional[int] = """https://api.openweathermap.org/data/2.5/"""
def _lowerCamelCase ( __lowerCamelCase = "Chicago" , __lowerCamelCase = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + """weather""" , params=locals() ).json()
def _lowerCamelCase ( __lowerCamelCase = "Kolkata, India" , __lowerCamelCase = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + """forecast""" , params=locals() ).json()
def _lowerCamelCase ( __lowerCamelCase = 55.68 , __lowerCamelCase = 12.57 , __lowerCamelCase = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + """onecall""" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
SCREAMING_SNAKE_CASE__ : int = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 79 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : Tuple , _lowercase : bool = True , _lowercase : Optional[Dict[str, int]] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[str] , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Any , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[Any] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : float , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Any , _lowercase : ImageInput , _lowercase : Optional[bool] = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[float] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowercase : Any , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def __a ( self : int , _lowercase : List[str] , _lowercase : List[Tuple] = None ):
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_lowercase ):
A = target_sizes.numpy()
A = []
for idx in range(len(_lowercase ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_lowercase )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 690 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.