code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
def get_matched_characters(_lowerCamelCase : str ,_lowerCamelCase : str ) -> str:
_lowerCAmelCase : Any = []
_lowerCAmelCase : List[Any] = min(len(_stra ) ,len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_lowerCAmelCase : Tuple = int(max(0 ,i - limit ) )
_lowerCAmelCase : Union[str, Any] = int(min(i + limit + 1 ,len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_lowerCamelCase )
_lowerCAmelCase : List[str] = f"{_stra[0:_stra.index(_lowerCamelCase )]} {_stra[_stra.index(_lowerCamelCase ) + 1:]}"
return "".join(_lowerCamelCase )
# matching characters
_lowerCAmelCase : Any = get_matched_characters(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_matched_characters(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase )
# transposition
_lowerCAmelCase : Union[str, Any] = (
len([(ca, ca) for ca, ca in zip(_lowerCamelCase ,_lowerCamelCase ) if ca != ca] ) // 2
)
if not match_count:
_lowerCAmelCase : int = 0.0
else:
_lowerCAmelCase : Optional[Any] = (
1
/ 3
* (
match_count / len(_lowerCamelCase )
+ match_count / len(_lowerCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_lowerCAmelCase : Dict = 0
for ca, ca in zip(stra[:4] ,stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 663 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = DiTPipeline
_UpperCamelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCamelCase : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=a__ , )
_lowerCAmelCase : Optional[int] = AutoencoderKL()
_lowerCAmelCase : Union[str, Any] = DDIMScheduler()
_lowerCAmelCase : Optional[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Any = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Any = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs(a__ )
_lowerCAmelCase : List[str] = pipe(**a__ ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase : List[Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_lowerCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def __A ( self ):
self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : int = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase : Dict = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase : Union[str, Any] = pipe.get_label_ids(a__ )
_lowerCAmelCase : Any = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
F"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def __A ( self ):
_lowerCAmelCase : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase : List[str] = ["""vase""", """umbrella"""]
_lowerCAmelCase : Optional[int] = pipe.get_label_ids(a__ )
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 663 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a : str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> List[str]:
_lowerCAmelCase : List[Any] = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
_lowerCAmelCase : List[Any] = 1024
_lowerCAmelCase : Tuple = 4096
_lowerCAmelCase : Optional[int] = 24
_lowerCAmelCase : Any = 16
_lowerCAmelCase : Any = [5, 11, 17, 23]
_lowerCAmelCase : List[str] = [256, 512, 1024, 1024]
_lowerCAmelCase : Dict = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowerCAmelCase : Tuple = 768
_lowerCAmelCase : List[str] = [1, 1, 1, 0.5]
_lowerCAmelCase : int = [256, 512, 768, 768]
_lowerCAmelCase : Optional[Any] = 150
_lowerCAmelCase : Optional[Any] = 16
_lowerCAmelCase : Dict = (1, 384, 384)
_lowerCAmelCase : str = False
_lowerCAmelCase : int = """project"""
if "ade" in checkpoint_url:
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[str] = 768
_lowerCAmelCase : Dict = [1, 1, 1, 0.5]
_lowerCAmelCase : Any = 150
_lowerCAmelCase : str = 16
_lowerCAmelCase : Dict = """huggingface/label-files"""
_lowerCAmelCase : Optional[int] = """ade20k-id2label.json"""
_lowerCAmelCase : Optional[Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase ,_lowerCamelCase ,repo_type="""dataset""" ) ) ,"""r""" ) )
_lowerCAmelCase : List[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : List[Any] = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : List[Any] = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Union[str, Any]:
_lowerCAmelCase : Union[str, Any] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> Dict:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowerCAmelCase : List[Any] = name.replace("""pretrained.model""" ,"""dpt.encoder""" )
if "pretrained.model" in name:
_lowerCAmelCase : int = name.replace("""pretrained.model""" ,"""dpt.embeddings""" )
if "patch_embed" in name:
_lowerCAmelCase : str = name.replace("""patch_embed""" ,"""""" )
if "pos_embed" in name:
_lowerCAmelCase : Optional[int] = name.replace("""pos_embed""" ,"""position_embeddings""" )
if "attn.proj" in name:
_lowerCAmelCase : int = name.replace("""attn.proj""" ,"""attention.output.dense""" )
if "proj" in name and "project" not in name:
_lowerCAmelCase : int = name.replace("""proj""" ,"""projection""" )
if "blocks" in name:
_lowerCAmelCase : str = name.replace("""blocks""" ,"""layer""" )
if "mlp.fc1" in name:
_lowerCAmelCase : List[Any] = name.replace("""mlp.fc1""" ,"""intermediate.dense""" )
if "mlp.fc2" in name:
_lowerCAmelCase : int = name.replace("""mlp.fc2""" ,"""output.dense""" )
if "norm1" in name and "backbone" not in name:
_lowerCAmelCase : str = name.replace("""norm1""" ,"""layernorm_before""" )
if "norm2" in name and "backbone" not in name:
_lowerCAmelCase : int = name.replace("""norm2""" ,"""layernorm_after""" )
if "scratch.output_conv" in name:
_lowerCAmelCase : str = name.replace("""scratch.output_conv""" ,"""head""" )
if "scratch" in name:
_lowerCAmelCase : List[str] = name.replace("""scratch""" ,"""neck""" )
if "layer1_rn" in name:
_lowerCAmelCase : Dict = name.replace("""layer1_rn""" ,"""convs.0""" )
if "layer2_rn" in name:
_lowerCAmelCase : List[Any] = name.replace("""layer2_rn""" ,"""convs.1""" )
if "layer3_rn" in name:
_lowerCAmelCase : Optional[Any] = name.replace("""layer3_rn""" ,"""convs.2""" )
if "layer4_rn" in name:
_lowerCAmelCase : List[str] = name.replace("""layer4_rn""" ,"""convs.3""" )
if "refinenet" in name:
_lowerCAmelCase : Optional[Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowerCAmelCase : List[str] = name.replace(f"refinenet{layer_idx}" ,f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
_lowerCAmelCase : Optional[Any] = name.replace("""out_conv""" ,"""projection""" )
if "resConfUnit1" in name:
_lowerCAmelCase : Optional[int] = name.replace("""resConfUnit1""" ,"""residual_layer1""" )
if "resConfUnit2" in name:
_lowerCAmelCase : Optional[int] = name.replace("""resConfUnit2""" ,"""residual_layer2""" )
if "conv1" in name:
_lowerCAmelCase : Optional[Any] = name.replace("""conv1""" ,"""convolution1""" )
if "conv2" in name:
_lowerCAmelCase : Optional[Any] = name.replace("""conv2""" ,"""convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowerCAmelCase : Optional[int] = name.replace("""pretrained.act_postprocess1.0.project.0""" ,"""neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowerCAmelCase : Optional[int] = name.replace("""pretrained.act_postprocess2.0.project.0""" ,"""neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowerCAmelCase : List[Any] = name.replace("""pretrained.act_postprocess3.0.project.0""" ,"""neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowerCAmelCase : int = name.replace("""pretrained.act_postprocess4.0.project.0""" ,"""neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowerCAmelCase : int = name.replace("""pretrained.act_postprocess1.3""" ,"""neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
_lowerCAmelCase : List[str] = name.replace("""pretrained.act_postprocess1.4""" ,"""neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
_lowerCAmelCase : List[str] = name.replace("""pretrained.act_postprocess2.3""" ,"""neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
_lowerCAmelCase : Union[str, Any] = name.replace("""pretrained.act_postprocess2.4""" ,"""neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
_lowerCAmelCase : Any = name.replace("""pretrained.act_postprocess3.3""" ,"""neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
_lowerCAmelCase : Optional[int] = name.replace("""pretrained.act_postprocess4.3""" ,"""neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
_lowerCAmelCase : Dict = name.replace("""pretrained.act_postprocess4.4""" ,"""neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
_lowerCAmelCase : List[Any] = name.replace("""pretrained""" ,"""dpt""" )
if "bn" in name:
_lowerCAmelCase : Tuple = name.replace("""bn""" ,"""batch_norm""" )
if "head" in name:
_lowerCAmelCase : Dict = name.replace("""head""" ,"""head.head""" )
if "encoder.norm" in name:
_lowerCAmelCase : Union[str, Any] = name.replace("""encoder.norm""" ,"""layernorm""" )
if "auxlayer" in name:
_lowerCAmelCase : Any = name.replace("""auxlayer""" ,"""auxiliary_head.head""" )
if "backbone" in name:
_lowerCAmelCase : int = name.replace("""backbone""" ,"""backbone.bit.encoder""" )
if ".." in name:
_lowerCAmelCase : Dict = name.replace("""..""" ,""".""" )
if "stem.conv" in name:
_lowerCAmelCase : Any = name.replace("""stem.conv""" ,"""bit.embedder.convolution""" )
if "blocks" in name:
_lowerCAmelCase : List[str] = name.replace("""blocks""" ,"""layers""" )
if "convolution" in name and "backbone" in name:
_lowerCAmelCase : str = name.replace("""convolution""" ,"""conv""" )
if "layer" in name and "backbone" in name:
_lowerCAmelCase : Any = name.replace("""layer""" ,"""layers""" )
if "backbone.bit.encoder.bit" in name:
_lowerCAmelCase : str = name.replace("""backbone.bit.encoder.bit""" ,"""backbone.bit""" )
if "embedder.conv" in name:
_lowerCAmelCase : List[str] = name.replace("""embedder.conv""" ,"""embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
_lowerCAmelCase : List[str] = name.replace("""backbone.bit.encoder.stem.norm""" ,"""backbone.bit.embedder.norm""" )
return name
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Optional[Any] ) -> List[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : str = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
_lowerCAmelCase : Optional[int] = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Any = in_proj_weight[: config.hidden_size, :]
_lowerCAmelCase : int = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Tuple = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase ,stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Tuple ,_lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase , _lowerCAmelCase : Tuple = get_dpt_config(_lowerCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowerCAmelCase : Optional[Any] = torch.load(_lowerCamelCase ,map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(_lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
_lowerCAmelCase : List[Any] = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
# read in qkv matrices
read_in_q_k_v(_lowerCamelCase ,_lowerCamelCase )
# load HuggingFace model
_lowerCAmelCase : List[str] = DPTForSemanticSegmentation(_lowerCamelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# Check outputs on an image
_lowerCAmelCase : Union[str, Any] = 480 if """ade""" in checkpoint_url else 384
_lowerCAmelCase : Optional[Any] = DPTImageProcessor(size=_lowerCamelCase )
_lowerCAmelCase : List[Any] = prepare_img()
_lowerCAmelCase : Optional[Any] = image_processor(_lowerCamelCase ,return_tensors="""pt""" )
# forward pass
_lowerCAmelCase : Optional[Any] = model(**_lowerCamelCase ).logits if """ade""" in checkpoint_url else model(**_lowerCamelCase ).predicted_depth
if show_prediction:
_lowerCAmelCase : Union[str, Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) ,size=(image.size[1], image.size[0]) ,mode="""bicubic""" ,align_corners=_lowerCamelCase ,)
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
_a : str = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 663 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : Tuple = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 663 | 1 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ,_lowerCamelCase : int ) -> bool:
if len(_lowerCamelCase ) == 0:
return False
_lowerCAmelCase : Tuple = len(_lowerCamelCase ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] ,_lowerCamelCase )
else:
return binary_search(a_list[midpoint + 1 :] ,_lowerCamelCase )
if __name__ == "__main__":
_a : List[str] = input('Enter numbers separated by comma:\n').strip()
_a : Any = [int(item.strip()) for item in user_input.split(',')]
_a : Union[str, Any] = int(input('Enter the number to be found in the list:\n').strip())
_a : int = '' if binary_search(sequence, target) else 'not '
print(F"""{target} was {not_str}found in {sequence}""")
| 663 |
"""simple docstring"""
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> List[Any]:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[int] = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_lowerCAmelCase : List[str] = subprocess.run(_lowerCamelCase ,shell=_lowerCamelCase ,stdout=subprocess.PIPE )
_lowerCAmelCase : int = output.stdout.decode("""utf-8""" )
_lowerCAmelCase : Tuple = json.loads(_lowerCamelCase )
_lowerCAmelCase : int = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" ,"""w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : int = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return values.split(""",""" )
_a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_a : Tuple = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 663 | 1 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_a : Tuple = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
_a : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : Tuple = """https://pypi.org/pypi/diffusers/json"""
_lowerCAmelCase : Optional[Any] = json.loads(request.urlopen(_lowerCamelCase ).read() )["""releases"""].keys()
return sorted(_lowerCamelCase ,key=lambda _lowerCamelCase : version.Version(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( ) -> Dict:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_lowerCamelCase )
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
_lowerCAmelCase : List[Any] = Path(_lowerCamelCase ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, os.PathLike] ) -> Any:
init_hf_modules()
_lowerCAmelCase : List[str] = Path(_lowerCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
_lowerCAmelCase : str = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> int:
with open(_lowerCamelCase ,"""r""" ,encoding="""utf-8""" ) as f:
_lowerCAmelCase : List[Any] = f.read()
# Imports of the form `import .xxx`
_lowerCAmelCase : str = re.findall("""^\s*import\s+\.(\S+)\s*$""" ,_lowerCamelCase ,flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" ,_lowerCamelCase ,flags=re.MULTILINE )
# Unique-ify
return list(set(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> Any:
_lowerCAmelCase : List[str] = False
_lowerCAmelCase : List[str] = [module_file]
_lowerCAmelCase : List[str] = []
# Let's recurse through all relative imports
while not no_change:
_lowerCAmelCase : Tuple = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = Path(_lowerCamelCase ).parent
_lowerCAmelCase : Optional[int] = [str(module_path / m ) for m in new_imports]
_lowerCAmelCase : Tuple = [f for f in new_import_files if f not in all_relative_imports]
_lowerCAmelCase : Optional[int] = [f"{f}.py" for f in new_import_files]
_lowerCAmelCase : Tuple = len(_lowerCamelCase ) == 0
all_relative_imports.extend(_lowerCamelCase )
return all_relative_imports
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> Optional[Any]:
with open(_lowerCamelCase ,"""r""" ,encoding="""utf-8""" ) as f:
_lowerCAmelCase : str = f.read()
# Imports of the form `import xxx`
_lowerCAmelCase : List[Any] = re.findall("""^\s*import\s+(\S+)\s*$""" ,_lowerCamelCase ,flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" ,_lowerCamelCase ,flags=re.MULTILINE )
# Only keep the top-level module
_lowerCAmelCase : List[Any] = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
_lowerCAmelCase : Optional[int] = list(set(_lowerCamelCase ) )
_lowerCAmelCase : List[str] = []
for imp in imports:
try:
importlib.import_module(_lowerCamelCase )
except ImportError:
missing_packages.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
f"{', '.join(_lowerCamelCase )}. Run `pip install {' '.join(_lowerCamelCase )}`" )
return get_relative_imports(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : List[str] ) -> int:
_lowerCAmelCase : Any = module_path.replace(os.path.sep ,""".""" )
_lowerCAmelCase : List[Any] = importlib.import_module(_lowerCamelCase )
if class_name is None:
return find_pipeline_class(_lowerCamelCase )
return getattr(_lowerCamelCase ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> List[Any]:
from ..pipelines import DiffusionPipeline
_lowerCAmelCase : int = dict(inspect.getmembers(_lowerCamelCase ,inspect.isclass ) )
_lowerCAmelCase : Union[str, Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls ,_lowerCamelCase )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
f" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
f" {loaded_module}." )
_lowerCAmelCase : Union[str, Any] = cls
return pipeline_class
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, os.PathLike] ,_lowerCamelCase : str ,_lowerCamelCase : Optional[Union[str, os.PathLike]] = None ,_lowerCamelCase : bool = False ,_lowerCamelCase : bool = False ,_lowerCamelCase : Optional[Dict[str, str]] = None ,_lowerCamelCase : Optional[Union[bool, str]] = None ,_lowerCamelCase : Optional[str] = None ,_lowerCamelCase : bool = False ,) -> Union[str, Any]:
_lowerCAmelCase : int = str(_lowerCamelCase )
_lowerCAmelCase : List[Any] = os.path.join(_lowerCamelCase ,_lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
_lowerCAmelCase : Dict = module_file_or_url
_lowerCAmelCase : List[Any] = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
_lowerCAmelCase : List[str] = get_diffusers_versions()
# cut ".dev0"
_lowerCAmelCase : Optional[int] = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
_lowerCAmelCase : Any = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(f"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
_lowerCAmelCase : Dict = f"v{revision}"
elif revision == "main":
_lowerCAmelCase : Tuple = revision
else:
raise ValueError(
f"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
f" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
_lowerCAmelCase : Union[str, Any] = COMMUNITY_PIPELINES_URL.format(revision=_lowerCamelCase ,pipeline=_lowerCamelCase )
try:
_lowerCAmelCase : str = cached_download(
_lowerCamelCase ,cache_dir=_lowerCamelCase ,force_download=_lowerCamelCase ,proxies=_lowerCamelCase ,resume_download=_lowerCamelCase ,local_files_only=_lowerCamelCase ,use_auth_token=_lowerCamelCase ,)
_lowerCAmelCase : int = """git"""
_lowerCAmelCase : Union[str, Any] = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
_lowerCAmelCase : int = hf_hub_download(
_lowerCamelCase ,_lowerCamelCase ,cache_dir=_lowerCamelCase ,force_download=_lowerCamelCase ,proxies=_lowerCamelCase ,resume_download=_lowerCamelCase ,local_files_only=_lowerCamelCase ,use_auth_token=_lowerCamelCase ,)
_lowerCAmelCase : Optional[int] = os.path.join("""local""" ,"""--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
_lowerCAmelCase : List[Any] = check_imports(_lowerCamelCase )
# Now we move the module inside our cached dynamic modules.
_lowerCAmelCase : int = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_lowerCamelCase )
_lowerCAmelCase : int = Path(_lowerCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_lowerCamelCase ,submodule_path / module_file )
for module_needed in modules_needed:
_lowerCAmelCase : Dict = f"{module_needed}.py"
shutil.copy(os.path.join(_lowerCamelCase ,_lowerCamelCase ) ,submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : str = use_auth_token
elif use_auth_token is True:
_lowerCAmelCase : str = HfFolder.get_token()
else:
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Tuple = model_info(_lowerCamelCase ,revision=_lowerCamelCase ,token=_lowerCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_lowerCAmelCase : Union[str, Any] = submodule_path / commit_hash
_lowerCAmelCase : Any = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_lowerCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(_lowerCamelCase ,submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_lowerCamelCase ,f"{module_needed}.py" ,cache_dir=_lowerCamelCase ,force_download=_lowerCamelCase ,resume_download=_lowerCamelCase ,proxies=_lowerCamelCase ,use_auth_token=_lowerCamelCase ,revision=_lowerCamelCase ,local_files_only=_lowerCamelCase ,)
return os.path.join(_lowerCamelCase ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, os.PathLike] ,_lowerCamelCase : str ,_lowerCamelCase : Optional[str] = None ,_lowerCamelCase : Optional[Union[str, os.PathLike]] = None ,_lowerCamelCase : bool = False ,_lowerCamelCase : bool = False ,_lowerCamelCase : Optional[Dict[str, str]] = None ,_lowerCamelCase : Optional[Union[bool, str]] = None ,_lowerCamelCase : Optional[str] = None ,_lowerCamelCase : bool = False ,**_lowerCamelCase : List[str] ,) -> Dict:
_lowerCAmelCase : Optional[int] = get_cached_module_file(
_lowerCamelCase ,_lowerCamelCase ,cache_dir=_lowerCamelCase ,force_download=_lowerCamelCase ,resume_download=_lowerCamelCase ,proxies=_lowerCamelCase ,use_auth_token=_lowerCamelCase ,revision=_lowerCamelCase ,local_files_only=_lowerCamelCase ,)
return get_class_in_module(_lowerCamelCase ,final_module.replace(""".py""" ,"""""" ) )
| 663 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 663 | 1 |
"""simple docstring"""
from collections import defaultdict
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> bool:
_lowerCAmelCase : List[Any] = first_str.lower().strip()
_lowerCAmelCase : Union[str, Any] = second_str.lower().strip()
# Remove whitespace
_lowerCAmelCase : str = first_str.replace(""" """ ,"""""" )
_lowerCAmelCase : Any = second_str.replace(""" """ ,"""""" )
# Strings of different lengths are not anagrams
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
return False
# Default values for count should be 0
_lowerCAmelCase : defaultdict[str, int] = defaultdict(_lowerCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_lowerCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_a : Union[str, Any] = input('Enter the first string ').strip()
_a : Union[str, Any] = input('Enter the second string ').strip()
_a : List[str] = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 663 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int:
_lowerCAmelCase : List[str] = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,_lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 663 | 1 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_a : str = None
try:
import msvcrt
except ImportError:
_a : List[str] = None
try:
import fcntl
except ImportError:
_a : int = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_a : int = OSError
# Data
# ------------------------------------------------
_a : Dict = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
_a : Union[str, Any] = '3.0.12'
_a : Optional[int] = None
def SCREAMING_SNAKE_CASE ( ) -> str:
global _logger
_lowerCAmelCase : Tuple = _logger or logging.getLogger(__name__ )
return _logger
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
_lowerCAmelCase : str = lock_file
return None
def __str__( self ):
_lowerCAmelCase : List[str] = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class __A :
def __init__( self , a__ ):
_lowerCAmelCase : List[str] = lock
return None
def __enter__( self ):
return self.lock
def __exit__( self , a__ , a__ , a__ ):
self.lock.release()
return None
class __A :
def __init__( self , a__ , a__=-1 , a__=None ):
_lowerCAmelCase : Optional[Any] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
_lowerCAmelCase : Any = self.hash_filename_if_too_long(a__ , a__ )
# The path to the lock file.
_lowerCAmelCase : Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_lowerCAmelCase : str = None
# The default timeout value.
_lowerCAmelCase : Tuple = timeout
# We use this lock primarily for the lock counter.
_lowerCAmelCase : List[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_lowerCAmelCase : Any = 0
return None
@property
def __A ( self ):
return self._lock_file
@property
def __A ( self ):
return self._timeout
@timeout.setter
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = float(a__ )
return None
def __A ( self ):
raise NotImplementedError()
def __A ( self ):
raise NotImplementedError()
@property
def __A ( self ):
return self._lock_file_fd is not None
def __A ( self , a__=None , a__=0.0_5 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
_lowerCAmelCase : Tuple = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_lowerCAmelCase : int = id(self )
_lowerCAmelCase : Optional[Any] = self._lock_file
_lowerCAmelCase : Dict = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(a__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_lowerCAmelCase : str = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __A ( self , a__=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_lowerCAmelCase : List[str] = id(self )
_lowerCAmelCase : Dict = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
_lowerCAmelCase : Optional[int] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self ):
self.acquire()
return self
def __exit__( self , a__ , a__ , a__ ):
self.release()
return None
def __del__( self ):
self.release(force=a__ )
return None
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = os.path.basename(a__ )
if len(a__ ) > max_length and max_length > 0:
_lowerCAmelCase : Any = os.path.dirname(a__ )
_lowerCAmelCase : Dict = str(hash(a__ ) )
_lowerCAmelCase : Dict = filename[: max_length - len(a__ ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(a__ , a__ )
else:
return path
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__=-1 , a__=None ):
from .file_utils import relative_to_absolute_path
super().__init__(a__ , timeout=a__ , max_filename_length=a__ )
_lowerCAmelCase : Union[str, Any] = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def __A ( self ):
_lowerCAmelCase : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_lowerCAmelCase : Union[str, Any] = os.open(self._lock_file , a__ )
except OSError:
pass
else:
try:
msvcrt.locking(a__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(a__ )
else:
_lowerCAmelCase : str = fd
return None
def __A ( self ):
_lowerCAmelCase : Tuple = self._lock_file_fd
_lowerCAmelCase : List[str] = None
msvcrt.locking(a__ , msvcrt.LK_UNLCK , 1 )
os.close(a__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__=-1 , a__=None ):
_lowerCAmelCase : Tuple = os.statvfs(os.path.dirname(a__ ) ).f_namemax
super().__init__(a__ , timeout=a__ , max_filename_length=a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_lowerCAmelCase : str = os.open(self._lock_file , a__ )
try:
fcntl.flock(a__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(a__ )
else:
_lowerCAmelCase : Any = fd
return None
def __A ( self ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_lowerCAmelCase : Union[str, Any] = self._lock_file_fd
_lowerCAmelCase : Dict = None
fcntl.flock(a__ , fcntl.LOCK_UN )
os.close(a__ )
return None
class __A ( SCREAMING_SNAKE_CASE_ ):
def __A ( self ):
_lowerCAmelCase : int = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_lowerCAmelCase : Any = os.open(self._lock_file , a__ )
except OSError:
pass
else:
_lowerCAmelCase : Any = fd
return None
def __A ( self ):
os.close(self._lock_file_fd )
_lowerCAmelCase : List[str] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_a : Optional[int] = None
if msvcrt:
_a : List[str] = WindowsFileLock
elif fcntl:
_a : Dict = UnixFileLock
else:
_a : int = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 663 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : Optional[Any] = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
_lowerCAmelCase : List[Any] = Image.open(requests.get(_lowerCamelCase ,stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ) -> str:
_lowerCAmelCase : int = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> List[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Dict = val
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : List[str] ) -> str:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : int = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias" )
_lowerCAmelCase : List[Any] = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
_lowerCAmelCase : Any = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase ,requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : Optional[Any] = qkv_bias
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> List[str]:
_lowerCAmelCase : int = 364 if """coco""" in model_name else 224
_lowerCAmelCase : int = InstructBlipVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xl""" ,dense_act_fn="""gelu""" ,bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : Optional[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" ,dense_act_fn="""gelu""" ,bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
_lowerCAmelCase : Dict = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" ,vocab_size=32001 ).to_dict()
elif "vicuna-13b" in model_name:
_lowerCAmelCase : Tuple = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" ,vocab_size=32001 ).to_dict()
else:
raise ValueError("""Model name not supported""" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
_lowerCAmelCase : List[Any] = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict()
_lowerCAmelCase : Optional[Any] = InstructBlipConfig(vision_config=_lowerCamelCase ,text_config=_lowerCamelCase ,qformer_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : List[Any]=None ,_lowerCamelCase : Any=False ) -> Any:
_lowerCAmelCase : int = AutoTokenizer.from_pretrained("""bert-base-uncased""" ,truncation_side="""left""" )
qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} )
if "t5" in model_name:
_lowerCAmelCase : str = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" ,truncation_side="""left""" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
_lowerCAmelCase : Any = LlamaTokenizerFast.from_pretrained(
"""huggyllama/llama-7b""" ,truncation_side="""left""" ,bos_token="""</s>""" ,unk_token="""</s>""" )
tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} )
_lowerCAmelCase , _lowerCAmelCase : Dict = get_blipa_config(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = InstructBlipForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : List[Any] = {
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Union[str, Any] = """cuda:1""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase : Optional[int] = """cuda:2""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = load_model_and_preprocess(
name=_lowerCamelCase ,model_type=_lowerCamelCase ,is_eval=_lowerCamelCase ,device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[str] = original_model.state_dict()
_lowerCAmelCase : Any = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : int = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : Tuple = key.replace("""Qformer.bert""" ,"""qformer""" )
if "attention.self" in key:
_lowerCAmelCase : int = key.replace("""self""" ,"""attention""" )
if "llm_proj" in key:
_lowerCAmelCase : Dict = key.replace("""llm_proj""" ,"""language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Dict = key.replace("""t5_proj""" ,"""language_projection""" )
if key.startswith("""llm_model""" ):
_lowerCAmelCase : str = key.replace("""llm_model""" ,"""language_model""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : Optional[Any] = key.replace("""t5""" ,"""language""" )
_lowerCAmelCase : Dict = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase ,_lowerCamelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(_lowerCamelCase ,strict=_lowerCamelCase )
_lowerCAmelCase : int = load_demo_image()
_lowerCAmelCase : Dict = """What is unusual about this image?"""
# create processor
_lowerCAmelCase : int = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} ,image_mean=_lowerCamelCase ,image_std=_lowerCamelCase )
_lowerCAmelCase : List[str] = InstructBlipProcessor(
image_processor=_lowerCamelCase ,tokenizer=_lowerCamelCase ,qformer_tokenizer=_lowerCamelCase ,)
_lowerCAmelCase : Optional[Any] = processor(images=_lowerCamelCase ,text=_lowerCamelCase ,return_tensors="""pt""" ).to(_lowerCamelCase )
# make sure processor creates exact same pixel values
_lowerCAmelCase : Any = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) ,_lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "vicuna" in model_name:
_lowerCAmelCase : Dict = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits
_lowerCAmelCase : str = hf_model(**_lowerCamelCase ).logits
else:
_lowerCAmelCase : Dict = original_model(
{"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Union[str, Any] = tokenizer("""\n""" ,return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id ,-100 )
_lowerCAmelCase : str = hf_model(**_lowerCamelCase ,labels=_lowerCamelCase ).logits
print("""First values of original logits:""" ,original_logits[0, :3, :3] )
print("""First values of HF logits:""" ,logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
_lowerCAmelCase : Optional[Any] = 1e-4 if """vicuna""" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) ,_lowerCamelCase ,atol=_lowerCamelCase )
print("""Looks ok!""" )
print("""Generating with original model...""" )
_lowerCAmelCase : str = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} ,num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("""Generating with HF model...""" )
_lowerCAmelCase : int = hf_model.generate(
**_lowerCamelCase ,do_sample=_lowerCamelCase ,num_beams=5 ,max_length=256 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.5 ,length_penalty=1.0 ,temperature=1 ,)
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
_lowerCAmelCase : List[str] = 2
print("""Original generation:""" ,_lowerCamelCase )
_lowerCAmelCase : List[str] = processor.batch_decode(_lowerCamelCase ,skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = [text.strip() for text in output_text]
print("""HF generation:""" ,_lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f"Salesforce/{model_name}" )
hf_model.push_to_hub(f"Salesforce/{model_name}" )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
_a : str = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
_a : Any = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 663 |
"""simple docstring"""
from PIL import Image
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ,_lowerCamelCase : int ) -> Image:
_lowerCAmelCase : Any = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
_a : str = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 663 | 1 |
"""simple docstring"""
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A :
def __init__( self ):
_lowerCAmelCase : Union[str, Any] = [
[],
[],
[],
]
def __A ( self , a__ , a__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(a__ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __A ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ):
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class __A :
def __init__( self ):
_lowerCAmelCase : int = []
def __A ( self , a__ ):
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(a__ )
def __A ( self ):
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
_lowerCAmelCase : int = min(self.queue )
self.queue.remove(a__ )
return data
def __str__( self ):
return str(self.queue )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Tuple = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 663 |
"""simple docstring"""
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A :
def __init__( self ):
_lowerCAmelCase : Union[str, Any] = [
[],
[],
[],
]
def __A ( self , a__ , a__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(a__ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __A ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ):
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class __A :
def __init__( self ):
_lowerCAmelCase : int = []
def __A ( self , a__ ):
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(a__ )
def __A ( self ):
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
_lowerCAmelCase : int = min(self.queue )
self.queue.remove(a__ )
return data
def __str__( self ):
return str(self.queue )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Tuple = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 663 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a : int = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A ( unittest.TestCase ):
_UpperCamelCase : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_UpperCamelCase : Any = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = AudioClassificationPipeline(model=a__ , feature_extractor=a__ )
# test with a raw waveform
_lowerCAmelCase : Optional[int] = np.zeros((34000,) )
_lowerCAmelCase : Optional[Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def __A ( self , a__ , a__ ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = examples
_lowerCAmelCase : List[Any] = audio_classifier(a__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
_lowerCAmelCase : Tuple = audio_classifier(a__ , top_k=1 )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
self.run_torchaudio(a__ )
@require_torchaudio
def __A ( self , a__ ):
import datasets
# test with a local file
_lowerCAmelCase : int = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_lowerCAmelCase : List[Any] = dataset[0]["""audio"""]["""array"""]
_lowerCAmelCase : str = audio_classifier(a__ )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
@require_torch
def __A ( self ):
_lowerCAmelCase : int = """anton-l/wav2vec2-random-tiny-classifier"""
_lowerCAmelCase : Optional[Any] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : Any = np.ones((8000,) )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
_lowerCAmelCase : List[str] = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
_lowerCAmelCase : str = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_lowerCAmelCase : int = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_lowerCAmelCase : int = audio_classifier(a__ , top_k=4 )
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A ( self ):
import datasets
_lowerCAmelCase : Optional[Any] = """superb/wav2vec2-base-superb-ks"""
_lowerCAmelCase : List[str] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : str = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_lowerCAmelCase : Optional[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
self.assertEqual(
nested_simplify(a__ , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __A ( self ):
pass
| 663 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : int = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = "vivit"
def __init__( self , a__=224 , a__=32 , a__=[2, 16, 16] , a__=3 , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu_fast" , a__=0.0 , a__=0.0 , a__=0.0_2 , a__=1e-06 , a__=True , **a__ , ):
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : List[Any] = attention_probs_dropout_prob
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Optional[Any] = image_size
_lowerCAmelCase : Union[str, Any] = num_frames
_lowerCAmelCase : Optional[Any] = tubelet_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : int = qkv_bias
super().__init__(**a__ )
| 663 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Dict=8 ) -> Any:
_lowerCAmelCase : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any=512 ,_lowerCamelCase : Dict=512 ) -> List[Any]:
_lowerCAmelCase : Any = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 )
_lowerCAmelCase : Dict = np.array(pil_image.convert("""RGB""" ) )
_lowerCAmelCase : List[str] = arr.astype(np.floataa ) / 1_27.5 - 1
_lowerCAmelCase : int = np.transpose(_lowerCamelCase ,[2, 0, 1] )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
_lowerCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __A ( self , a__ , a__ , a__ ):
# get the original timestep using init_timestep
_lowerCAmelCase : Optional[Any] = min(int(num_inference_steps * strength ) , a__ )
_lowerCAmelCase : List[Any] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__=None ):
if not isinstance(a__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a__ )}" )
_lowerCAmelCase : Union[str, Any] = image.to(device=a__ , dtype=a__ )
_lowerCAmelCase : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCAmelCase : int = image
else:
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(a__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[int] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a__ )
]
_lowerCAmelCase : Optional[int] = torch.cat(a__ , dim=0 )
else:
_lowerCAmelCase : List[Any] = self.movq.encode(a__ ).latent_dist.sample(a__ )
_lowerCAmelCase : Dict = self.movq.config.scaling_factor * init_latents
_lowerCAmelCase : str = torch.cat([init_latents] , dim=0 )
_lowerCAmelCase : Dict = init_latents.shape
_lowerCAmelCase : str = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
# get latents
_lowerCAmelCase : Optional[Any] = self.scheduler.add_noise(a__ , a__ , a__ )
_lowerCAmelCase : int = init_latents
return latents
def __A ( self , a__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : str = torch.device(F"cuda:{gpu_id}" )
_lowerCAmelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __A ( self , a__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase : str = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
_lowerCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self , a__ , a__ , a__ , a__ = 512 , a__ = 512 , a__ = 100 , a__ = 4.0 , a__ = 0.3 , a__ = 1 , a__ = None , a__ = "pil" , a__ = True , ):
_lowerCAmelCase : Dict = self._execution_device
_lowerCAmelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = torch.cat(a__ , dim=0 )
_lowerCAmelCase : Dict = image_embeds.shape[0]
if isinstance(a__ , a__ ):
_lowerCAmelCase : List[Any] = torch.cat(a__ , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : int = image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Any = negative_image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Any = [image]
if not all(isinstance(a__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(a__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
_lowerCAmelCase : Tuple = torch.cat([prepare_image(a__ , a__ , a__ ) for i in image] , dim=0 )
_lowerCAmelCase : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=a__ )
_lowerCAmelCase : Union[str, Any] = self.movq.encode(a__ )["""latents"""]
_lowerCAmelCase : Tuple = latents.repeat_interleave(a__ , dim=0 )
self.scheduler.set_timesteps(a__ , device=a__ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_timesteps(a__ , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCAmelCase , _lowerCAmelCase : Dict = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
_lowerCAmelCase : List[str] = self.prepare_latents(
a__ , a__ , a__ , a__ , image_embeds.dtype , a__ , a__ )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : int = {"""image_embeds""": image_embeds}
_lowerCAmelCase : List[str] = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = variance_pred.chunk(2 )
_lowerCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[str] = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
_lowerCAmelCase : int = self.movq.decode(a__ , force_not_quantize=a__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_lowerCAmelCase : List[Any] = image * 0.5 + 0.5
_lowerCAmelCase : Any = image.clamp(0 , 1 )
_lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 663 | 1 |
"""simple docstring"""
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __A ( unittest.TestCase ):
def __init__( self , a__ ):
_lowerCAmelCase : int = parent
def __A ( self ):
return {}
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
_lowerCAmelCase : List[Any] = """<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR=\"FFFFFF\">
<HR>
<a href=\"http://google.com\">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style=\"color:#0000FF\">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>"""
_lowerCAmelCase : str = """
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
"""
return [html_string_a, html_string_a]
@require_bsa
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = MarkupLMFeatureExtractor if is_bsa_available() else None
def __A ( self ):
_lowerCAmelCase : Tuple = MarkupLMFeatureExtractionTester(self )
@property
def __A ( self ):
return self.feature_extract_tester.prepare_feat_extract_dict()
def __A ( self ):
# Initialize feature_extractor
_lowerCAmelCase : List[Any] = self.feature_extraction_class()
# Test not batched input
_lowerCAmelCase : Union[str, Any] = get_html_strings()[0]
_lowerCAmelCase : Any = feature_extractor(a__ )
# fmt: off
_lowerCAmelCase : Tuple = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]]
_lowerCAmelCase : Optional[int] = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]]
# fmt: on
self.assertEqual(encoding.nodes , a__ )
self.assertEqual(encoding.xpaths , a__ )
# Test batched
_lowerCAmelCase : Optional[Any] = get_html_strings()
_lowerCAmelCase : Any = feature_extractor(a__ )
# fmt: off
_lowerCAmelCase : Union[str, Any] = expected_nodes + [["""My First Heading""", """My first paragraph."""]]
_lowerCAmelCase : int = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , a__ )
self.assertEqual(encoding.xpaths , a__ )
| 663 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = ShapEPipeline
_UpperCamelCase : Optional[Any] = ["prompt"]
_UpperCamelCase : Tuple = ["prompt"]
_UpperCamelCase : Dict = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : str = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 8
@property
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(a__ )
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_lowerCAmelCase : Any = PriorTransformer(**a__ )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase : Dict = ShapERenderer(**a__ )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_prior
_lowerCAmelCase : Any = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : Dict = self.dummy_renderer
_lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=a__ , clip_sample=a__ , clip_sample_range=1.0 , )
_lowerCAmelCase : List[Any] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[str] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Dict = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : str = self.pipeline_class(**a__ )
_lowerCAmelCase : List[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[str] = output.images[0]
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ):
_lowerCAmelCase : Any = torch_device == """cpu"""
_lowerCAmelCase : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a__ , relax_max_difference=a__ , )
def __A ( self ):
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = self.pipeline_class(**a__ )
_lowerCAmelCase : int = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(a__ )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase : str = batch_size * [inputs[key]]
_lowerCAmelCase : Tuple = pipe(**a__ , num_images_per_prompt=a__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
_lowerCAmelCase : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
_lowerCAmelCase : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : Any = pipe(
"""a shark""" , generator=a__ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a__ , a__ )
| 663 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __A ( unittest.TestCase ):
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.dummy_uncond_unet
_lowerCAmelCase : List[str] = DDIMScheduler()
_lowerCAmelCase : List[Any] = self.dummy_vq_model
_lowerCAmelCase : Tuple = LDMPipeline(unet=a__ , vqvae=a__ , scheduler=a__ )
ldm.to(a__ )
ldm.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : int = torch.manual_seed(0 )
_lowerCAmelCase : str = ldm(generator=a__ , num_inference_steps=2 , output_type="""numpy""" ).images
_lowerCAmelCase : List[Any] = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = ldm(generator=a__ , num_inference_steps=2 , output_type="""numpy""" , return_dict=a__ )[0]
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
_lowerCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : int = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
_lowerCAmelCase : Optional[Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : List[str] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(a__ )
ldm.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : int = ldm(generator=a__ , num_inference_steps=5 , output_type="""numpy""" ).images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : List[Any] = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
_lowerCAmelCase : Dict = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 663 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = CpmAntTokenizer
_UpperCamelCase : List[Any] = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Dict = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __A ( self ):
_lowerCAmelCase : Tuple = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_lowerCAmelCase : Optional[Any] = """今天天气真好!"""
_lowerCAmelCase : Any = ["""今天""", """天气""", """真""", """好""", """!"""]
_lowerCAmelCase : str = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = """今天天气真好!"""
_lowerCAmelCase : Optional[Any] = [tokenizer.bos_token] + tokens
_lowerCAmelCase : Optional[int] = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
_lowerCAmelCase : Tuple = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 663 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> bool:
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a : int = int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 663 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = CodeGenTokenizer
_UpperCamelCase : Dict = CodeGenTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : List[Any] = {"add_prefix_space": True}
_UpperCamelCase : str = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_lowerCAmelCase : Optional[int] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Any = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Tuple = """lower newer"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Any = """lower newer"""
# Testing tokenization
_lowerCAmelCase : Any = tokenizer.tokenize(a__ , add_prefix_space=a__ )
_lowerCAmelCase : int = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
_lowerCAmelCase : Dict = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_lowerCAmelCase : int = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
_lowerCAmelCase : Any = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
_lowerCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self , *a__ , **a__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __A ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_lowerCAmelCase : Dict = """This is a simple input"""
_lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Optional[int] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __A ( self ):
_lowerCAmelCase : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
_lowerCAmelCase : Union[str, Any] = """This is a simple input"""
_lowerCAmelCase : Dict = ["""This is a simple input looooooooong""", """This is a simple input"""]
_lowerCAmelCase : Any = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Optional[int] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_lowerCAmelCase : Optional[int] = tokenizer.pad_token_id
_lowerCAmelCase : Any = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
_lowerCAmelCase : str = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __A ( self ):
_lowerCAmelCase : List[str] = """$$$"""
_lowerCAmelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
_lowerCAmelCase : Tuple = """This is a simple input"""
_lowerCAmelCase : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : str = tokenizer(a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __A ( self ):
_lowerCAmelCase : int = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_lowerCAmelCase : Optional[int] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_lowerCAmelCase : List[Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_lowerCAmelCase : Tuple = tokenizer.encode(a__ )
_lowerCAmelCase : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_lowerCAmelCase : int = tokenizer.decode(a__ , truncate_before_pattern=a__ )
self.assertEqual(a__ , a__ )
def __A ( self ):
pass
| 663 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : List[str] = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : int = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 1 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
_a : Union[str, Any] = ''
_a : Optional[Any] = ''
_a : List[str] = ''
_a : Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def SCREAMING_SNAKE_CASE ( ) -> None:
_lowerCAmelCase , _lowerCAmelCase : int = get_dataset(_lowerCamelCase ,_lowerCamelCase )
print("""Processing...""" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = update_image_and_anno(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for index, image in enumerate(_lowerCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase : Tuple = random_chars(32 )
_lowerCAmelCase : Union[str, Any] = paths[index].split(os.sep )[-1].rsplit(""".""" ,1 )[0]
_lowerCAmelCase : List[str] = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(f"/{file_root}.jpg" ,_lowerCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"Success {index+1}/{len(_lowerCamelCase )} with {file_name}" )
_lowerCAmelCase : Tuple = []
for anno in new_annos[index]:
_lowerCAmelCase : str = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(_lowerCamelCase )
with open(f"/{file_root}.txt" ,"""w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> tuple[list, list]:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Optional[Any] = []
for label_file in glob.glob(os.path.join(_lowerCamelCase ,"""*.txt""" ) ):
_lowerCAmelCase : Optional[int] = label_file.split(os.sep )[-1].rsplit(""".""" ,1 )[0]
with open(_lowerCamelCase ) as in_file:
_lowerCAmelCase : Union[str, Any] = in_file.readlines()
_lowerCAmelCase : str = os.path.join(_lowerCamelCase ,f"{label_name}.jpg" )
_lowerCAmelCase : Dict = []
for obj_list in obj_lists:
_lowerCAmelCase : Dict = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_lowerCamelCase )
labels.append(_lowerCamelCase )
return img_paths, labels
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ,_lowerCamelCase : list ,_lowerCamelCase : int = 1 ) -> tuple[list, list, list]:
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : str = []
_lowerCAmelCase : Tuple = []
for idx in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Dict = img_list[idx]
path_list.append(_lowerCamelCase )
_lowerCAmelCase : Any = anno_list[idx]
_lowerCAmelCase : Any = cva.imread(_lowerCamelCase )
if flip_type == 1:
_lowerCAmelCase : List[Any] = cva.flip(_lowerCamelCase ,_lowerCamelCase )
for bbox in img_annos:
_lowerCAmelCase : Union[str, Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_lowerCAmelCase : str = cva.flip(_lowerCamelCase ,_lowerCamelCase )
for bbox in img_annos:
_lowerCAmelCase : Optional[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_lowerCamelCase )
new_imgs_list.append(_lowerCamelCase )
return new_imgs_list, new_annos_lists, path_list
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase : Optional[int] = ascii_lowercase + digits
return "".join(random.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 663 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> bool:
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a : int = int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 663 | 1 |
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
_a : Union[str, Any] = namedtuple('covid_data', 'cases deaths recovered')
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
_lowerCAmelCase : Dict = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(_lowerCamelCase ).content ).xpath(_lowerCamelCase ) )
_a : List[Any] = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 663 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __A :
_UpperCamelCase : int
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def SCREAMING_SNAKE_CASE ( ) -> Node | None:
_lowerCAmelCase : Tuple = Node(1 )
_lowerCAmelCase : int = Node(2 )
_lowerCAmelCase : int = Node(3 )
_lowerCAmelCase : Any = Node(4 )
_lowerCAmelCase : Dict = Node(5 )
return tree
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> int:
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
if root is None:
return output
_lowerCAmelCase : Union[str, Any] = deque([root] )
while process_queue:
_lowerCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
_lowerCAmelCase : list[Sequence[Node | None]] = []
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = height(_lowerCamelCase )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Any = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = 0
return output
def SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
_lowerCAmelCase : int = make_tree()
print(f"In-order Traversal: {inorder(_lowerCamelCase )}" )
print(f"Pre-order Traversal: {preorder(_lowerCamelCase )}" )
print(f"Post-order Traversal: {postorder(_lowerCamelCase )}" ,"""\n""" )
print(f"Height of Tree: {height(_lowerCamelCase )}" ,"""\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_lowerCamelCase ) ,"""\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 ,height(_lowerCamelCase ) + 1 ):
print(f"Level {level}:" ,get_nodes_from_left_to_right(_lowerCamelCase ,level=_lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 663 | 1 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
_a : Optional[Any] = getLogger(__name__)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : str ,_lowerCamelCase : str ,_lowerCamelCase : int = 8 ,_lowerCamelCase : int = 1024 ,_lowerCamelCase : Optional[Any]="val" ,_lowerCamelCase : List[str]=None ,_lowerCamelCase : List[str]=False ,_lowerCamelCase : Tuple="summarization" ,_lowerCamelCase : Dict=None ,_lowerCamelCase : int=1 ,_lowerCamelCase : Dict = None ,_lowerCamelCase : str="" ,**_lowerCamelCase : Optional[Any] ,) -> Dict:
_lowerCAmelCase : List[str] = str(_lowerCamelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""" ,rank=_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = Path(_lowerCamelCase )
_lowerCAmelCase : List[Any] = save_dir.joinpath(f"rank_{local_rank}_output.json" )
torch.cuda.set_device(_lowerCamelCase )
_lowerCAmelCase : str = AutoModelForSeqaSeqLM.from_pretrained(_lowerCamelCase ).cuda()
if fpaa:
_lowerCAmelCase : Tuple = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_lowerCamelCase ,_lowerCamelCase ) # update config with task specific params
_lowerCAmelCase : List[Any] = generate_kwargs.pop("""num_beams""" ,model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
_lowerCAmelCase : Optional[int] = num_return_sequences
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(_lowerCamelCase )
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
_lowerCAmelCase : List[str] = tokenizer.model_max_length
if prefix is None:
_lowerCAmelCase : Union[str, Any] = prefix or getattr(model.config ,"""prefix""" ,"""""" ) or """"""
_lowerCAmelCase : Union[str, Any] = SeqaSeqDataset(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,max_target_length=1024 ,type_path=_lowerCamelCase ,n_obs=_lowerCamelCase ,prefix=_lowerCamelCase ,**_lowerCamelCase ,)
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
_lowerCAmelCase : List[str] = ds.make_sortish_sampler(_lowerCamelCase ,distributed=_lowerCamelCase ,add_extra_examples=_lowerCamelCase ,shuffle=_lowerCamelCase )
_lowerCAmelCase : Any = DataLoader(_lowerCamelCase ,sampler=_lowerCamelCase ,batch_size=_lowerCamelCase ,collate_fn=ds.collate_fn )
_lowerCAmelCase : int = []
for batch in tqdm(_lowerCamelCase ):
_lowerCAmelCase : str = model.generate(
input_ids=batch["""input_ids"""].to(model.device ) ,attention_mask=batch["""attention_mask"""].to(model.device ) ,num_return_sequences=_lowerCamelCase ,num_beams=_lowerCamelCase ,**_lowerCamelCase ,)
_lowerCAmelCase : str = tokenizer.batch_decode(_lowerCamelCase ,skip_special_tokens=_lowerCamelCase ,clean_up_tokenization_spaces=_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = batch["""ids"""]
if num_return_sequences > 1:
_lowerCAmelCase : List[Any] = chunks(_lowerCamelCase ,_lowerCamelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_lowerCamelCase ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(_lowerCamelCase ,_lowerCamelCase )
return results, sampler.num_replicas
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : List[Any] = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""" ,type=_lowerCamelCase ,help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""" ,type=_lowerCamelCase ,help="""like facebook/bart-large-cnn,t5-base, etc.""" ,default="""sshleifer/distilbart-xsum-12-3""" ,)
parser.add_argument("""--save_dir""" ,type=_lowerCamelCase ,help="""where to save""" ,default="""tmp_gen""" )
parser.add_argument("""--max_source_length""" ,type=_lowerCamelCase ,default=_lowerCamelCase )
parser.add_argument(
"""--type_path""" ,type=_lowerCamelCase ,default="""test""" ,help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""" ,type=_lowerCamelCase ,default="""summarization""" ,help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" ,type=_lowerCamelCase ,default=8 ,required=_lowerCamelCase ,help="""batch size""" )
parser.add_argument(
"""--local_rank""" ,type=_lowerCamelCase ,default=-1 ,required=_lowerCamelCase ,help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,required=_lowerCamelCase ,help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""" ,type=_lowerCamelCase ,default=1 ,required=_lowerCamelCase ,help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""" ,type=_lowerCamelCase ,default=600 ,required=_lowerCamelCase ,help="""How long should master process wait for other processes to finish.""" ,)
parser.add_argument("""--src_lang""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,required=_lowerCamelCase )
parser.add_argument("""--tgt_lang""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,required=_lowerCamelCase )
parser.add_argument(
"""--prefix""" ,type=_lowerCamelCase ,required=_lowerCamelCase ,default=_lowerCamelCase ,help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""" ,action="""store_true""" )
parser.add_argument("""--debug""" ,action="""store_true""" )
_lowerCAmelCase : List[Any] = time.time()
_lowerCAmelCase , _lowerCAmelCase : str = parser.parse_known_args()
_lowerCAmelCase : Union[str, Any] = parse_numeric_n_bool_cl_kwargs(_lowerCamelCase )
if generate_kwargs and args.local_rank <= 0:
print(f"parsed the following generate kwargs: {generate_kwargs}" )
_lowerCAmelCase : str = Path(args.save_dir + """_tmp""" )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) # this handles locking.
_lowerCAmelCase : Any = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(f"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
_lowerCAmelCase : Union[str, Any] = {}
if args.src_lang is not None:
_lowerCAmelCase : Any = args.src_lang
if args.tgt_lang is not None:
_lowerCAmelCase : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = eval_data_dir(
args.data_dir ,_lowerCamelCase ,args.model_name ,type_path=args.type_path ,bs=args.bs ,fpaa=args.fpaa ,task=args.task ,local_rank=args.local_rank ,n_obs=args.n_obs ,max_source_length=args.max_source_length ,num_return_sequences=args.num_return_sequences ,prefix=args.prefix ,dataset_kwargs=_lowerCamelCase ,**_lowerCamelCase ,)
if args.local_rank <= 0:
_lowerCAmelCase : Optional[int] = Path(args.save_dir )
save_dir.mkdir(exist_ok=_lowerCamelCase )
_lowerCAmelCase : List[str] = gather_results_from_each_node(_lowerCamelCase ,_lowerCamelCase ,args.sync_timeout )
_lowerCAmelCase : List[Any] = combine_partial_results(_lowerCamelCase )
if args.num_return_sequences > 1:
_lowerCAmelCase : Optional[Any] = save_dir.joinpath("""pseudolabel_results.json""" )
print(f"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(_lowerCamelCase ,_lowerCamelCase )
return
_lowerCAmelCase : Union[str, Any] = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(_lowerCamelCase ) as f:
_lowerCAmelCase : List[Any] = [x.rstrip() for x in f.readlines()][: len(_lowerCamelCase )]
# Calculate metrics, save metrics, and save _generations.txt
_lowerCAmelCase : List[str] = """translation""" in args.task
_lowerCAmelCase : List[Any] = calculate_bleu if calc_bleu else calculate_rouge
_lowerCAmelCase : str = """bleu""" if calc_bleu else """rouge"""
_lowerCAmelCase : Dict = score_fn(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Any = len(_lowerCamelCase )
_lowerCAmelCase : int = time.time() - start_time
_lowerCAmelCase : Optional[Any] = round(runtime / metrics["""n_obs"""] ,4 )
_lowerCAmelCase : Any = num_replicas
# TODO(@stas00): add whatever metadata to metrics
_lowerCAmelCase : Union[str, Any] = save_dir.joinpath(f"{args.type_path}_{metric_name}.json" )
save_json(_lowerCamelCase ,_lowerCamelCase ,indent=_lowerCamelCase )
print(_lowerCamelCase )
write_txt_file(_lowerCamelCase ,save_dir.joinpath(f"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(_lowerCamelCase ,save_dir.joinpath(f"{args.type_path}.target" ) )
else:
shutil.rmtree(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> List:
_lowerCAmelCase : Union[str, Any] = []
for partial_result in partial_results:
records.extend(_lowerCamelCase )
_lowerCAmelCase : Any = sorted(_lowerCamelCase ,key=lambda _lowerCamelCase : x["id"] )
_lowerCAmelCase : str = [x["""pred"""] for x in records]
return preds
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[str] ,_lowerCamelCase : List[str] ) -> List[Dict[str, List]]:
# WAIT FOR lots of .json files
_lowerCAmelCase : Optional[int] = time.time()
logger.info("""waiting for all nodes to finish""" )
_lowerCAmelCase : str = None
while (time.time() - start_wait) < timeout:
_lowerCAmelCase : List[Any] = list(save_dir.glob("""rank_*.json""" ) )
if len(_lowerCamelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
_lowerCAmelCase : Any = lmap(_lowerCamelCase ,_lowerCamelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 663 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : Optional[Any] = torch.optim.SGD(model.parameters() , 0.1 )
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase : Tuple = accelerator.prepare(a__ )
try:
pickle.loads(pickle.dumps(a__ ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 663 | 1 |
"""simple docstring"""
_a : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 663 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ) -> Dict:
_lowerCAmelCase : List[str] = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
_lowerCAmelCase : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCAmelCase : Any = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCAmelCase : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float64""" ,[dim] )
_lowerCAmelCase : Optional[int] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCAmelCase : Dict = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCAmelCase : List[Any] = tf.placeholder("""int32""" )
_lowerCAmelCase : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float""" ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCAmelCase : Optional[int] = tf.reduce_mean(_lowerCamelCase ,0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCAmelCase : Dict = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase ,_lowerCamelCase ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[noofclusters] )
_lowerCAmelCase : str = tf.argmin(_lowerCamelCase ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCAmelCase : List[str] = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCAmelCase : Any = [
sess.run(_lowerCamelCase ,feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCAmelCase : Any = sess.run(
_lowerCamelCase ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
_lowerCAmelCase : List[Any] = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCAmelCase : Optional[int] = sess.run(
_lowerCamelCase ,feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCAmelCase : Optional[int] = sess.run(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sess.run(_lowerCamelCase )
return centroids, assignments
| 663 | 1 |
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : List[Any] ,_lowerCamelCase : List[str]=1024 ,_lowerCamelCase : Union[str, Any]=1024 ,_lowerCamelCase : str=False ,**_lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[Any] = SeqaSeqDataset(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,type_path="""train""" ,**_lowerCamelCase )
_lowerCAmelCase : List[Any] = tok.pad_token_id
def get_lens(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Any = tqdm(
DataLoader(_lowerCamelCase ,batch_size=512 ,num_workers=8 ,shuffle=_lowerCamelCase ,collate_fn=ds.collate_fn ) ,desc=str(ds.len_file ) ,)
_lowerCAmelCase : Dict = []
for batch in dl:
_lowerCAmelCase : List[str] = batch["""input_ids"""].ne(_lowerCamelCase ).sum(1 ).tolist()
_lowerCAmelCase : Optional[Any] = batch["""labels"""].ne(_lowerCamelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(_lowerCamelCase ,_lowerCamelCase ):
max_lens.append(max(_lowerCamelCase ,_lowerCamelCase ) )
else:
max_lens.extend(_lowerCamelCase )
return max_lens
_lowerCAmelCase : Optional[Any] = get_lens(_lowerCamelCase )
_lowerCAmelCase : int = SeqaSeqDataset(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,type_path="""val""" ,**_lowerCamelCase )
_lowerCAmelCase : int = get_lens(_lowerCamelCase )
pickle_save(_lowerCamelCase ,train_ds.len_file )
pickle_save(_lowerCamelCase ,val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 663 |
"""simple docstring"""
_a : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 663 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : Dict = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = ['ConvNextFeatureExtractor']
_a : Any = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 663 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __A :
def __init__( self , a__ , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Tuple = 13
_lowerCAmelCase : Tuple = 7
_lowerCAmelCase : Any = 30
_lowerCAmelCase : Optional[int] = self.seq_length + self.mem_len
_lowerCAmelCase : Dict = 15
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[str] = 99
_lowerCAmelCase : List[Any] = [10, 50, 80]
_lowerCAmelCase : Tuple = 32
_lowerCAmelCase : int = 32
_lowerCAmelCase : Dict = 4
_lowerCAmelCase : List[str] = 8
_lowerCAmelCase : Tuple = 128
_lowerCAmelCase : Any = 2
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Optional[int] = self.vocab_size - 1
_lowerCAmelCase : Dict = 0.0_1
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __A ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = TFTransfoXLModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a}
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model(a__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = TFTransfoXLLMHeadModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase : Dict = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
_lowerCAmelCase : Any = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = TFTransfoXLForSequenceClassification(a__ )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Tuple = () if is_tf_available() else ()
_UpperCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : str = False
_UpperCamelCase : str = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , d_embed=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a__ )
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(a__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowerCAmelCase : str = model.get_output_embeddings()
assert isinstance(a__ , tf.keras.layers.Layer )
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
else:
_lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
def __A ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __A ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFTransfoXLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __A ( self ):
pass
@require_tf
class __A ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_lowerCAmelCase : List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCAmelCase : List[Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCAmelCase : Tuple = model.generate(a__ , max_length=200 , do_sample=a__ )
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
| 663 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Any = BertTokenizer
_UpperCamelCase : str = BertTokenizerFast
_UpperCamelCase : List[Any] = True
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : Optional[Any] = filter_non_english
def __A ( self ):
super().setUp()
_lowerCAmelCase : Tuple = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = """UNwant\u00E9d,running"""
_lowerCAmelCase : Dict = """unwanted, running"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : Any = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase : Optional[int] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(a__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [9, 6, 7, 12, 10, 11] )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
_lowerCAmelCase : List[str] = self.get_rust_tokenizer()
_lowerCAmelCase : List[str] = """UNwant\u00E9d,running"""
_lowerCAmelCase : Tuple = tokenizer.tokenize(a__ )
_lowerCAmelCase : Optional[int] = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : List[str] = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Any = self.get_rust_tokenizer()
_lowerCAmelCase : List[Any] = tokenizer.encode(a__ )
_lowerCAmelCase : str = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# With lower casing
_lowerCAmelCase : Optional[int] = self.get_tokenizer(do_lower_case=a__ )
_lowerCAmelCase : int = self.get_rust_tokenizer(do_lower_case=a__ )
_lowerCAmelCase : Tuple = """UNwant\u00E9d,running"""
_lowerCAmelCase : Optional[Any] = tokenizer.tokenize(a__ )
_lowerCAmelCase : str = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Any = tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : Any = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Dict = self.get_rust_tokenizer()
_lowerCAmelCase : List[Any] = tokenizer.encode(a__ )
_lowerCAmelCase : int = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __A ( self ):
_lowerCAmelCase : int = BasicTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __A ( self ):
_lowerCAmelCase : Any = BasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = BasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __A ( self ):
_lowerCAmelCase : List[Any] = BasicTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __A ( self ):
_lowerCAmelCase : str = BasicTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = BasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ):
_lowerCAmelCase : Any = BasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ):
_lowerCAmelCase : int = BasicTokenizer(do_lower_case=a__ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __A ( self ):
_lowerCAmelCase : List[Any] = BasicTokenizer()
_lowerCAmelCase : int = """a\n'll !!to?'d of, can't."""
_lowerCAmelCase : Tuple = ["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""]
self.assertListEqual(tokenizer.tokenize(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_lowerCAmelCase : Any = {}
for i, token in enumerate(a__ ):
_lowerCAmelCase : Dict = i
_lowerCAmelCase : Optional[Any] = WordpieceTokenizer(vocab=a__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __A ( self ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __A ( self ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __A ( self ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __A ( self ):
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Tuple = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(a__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(a__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def __A ( self ):
_lowerCAmelCase : str = self.tokenizer_class.from_pretrained("""bert-base-uncased""" )
_lowerCAmelCase : int = tokenizer.encode("""sequence builders""" , add_special_tokens=a__ )
_lowerCAmelCase : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(a__ )
_lowerCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __A ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
_lowerCAmelCase : Any = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
_lowerCAmelCase : Dict = tokenizer_r.encode_plus(
a__ , return_attention_mask=a__ , return_token_type_ids=a__ , return_offsets_mapping=a__ , add_special_tokens=a__ , )
_lowerCAmelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(a__ , """do_lower_case""" ) else False
_lowerCAmelCase : int = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __A ( self ):
_lowerCAmelCase : int = ["""的""", """人""", """有"""]
_lowerCAmelCase : Optional[Any] = """""".join(a__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(a__ , **a__ )
_lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
_lowerCAmelCase : Any = tokenizer_p.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : List[str] = tokenizer_r.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : List[str] = tokenizer_r.convert_ids_to_tokens(a__ )
_lowerCAmelCase : List[str] = tokenizer_p.convert_ids_to_tokens(a__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : str = False
_lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
_lowerCAmelCase : str = self.tokenizer_class.from_pretrained(a__ , **a__ )
_lowerCAmelCase : Tuple = tokenizer_r.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : Optional[Any] = tokenizer_p.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : str = tokenizer_r.convert_ids_to_tokens(a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(a__ )
# it is expected that only the first Chinese character is not preceded by "##".
_lowerCAmelCase : List[Any] = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(a__ )
]
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , a__ )
| 663 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 663 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Dict=8 ) -> Any:
_lowerCAmelCase : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any=512 ,_lowerCamelCase : Dict=512 ) -> List[Any]:
_lowerCAmelCase : Any = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 )
_lowerCAmelCase : Dict = np.array(pil_image.convert("""RGB""" ) )
_lowerCAmelCase : List[str] = arr.astype(np.floataa ) / 1_27.5 - 1
_lowerCAmelCase : int = np.transpose(_lowerCamelCase ,[2, 0, 1] )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
_lowerCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __A ( self , a__ , a__ , a__ ):
# get the original timestep using init_timestep
_lowerCAmelCase : Optional[Any] = min(int(num_inference_steps * strength ) , a__ )
_lowerCAmelCase : List[Any] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__=None ):
if not isinstance(a__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a__ )}" )
_lowerCAmelCase : Union[str, Any] = image.to(device=a__ , dtype=a__ )
_lowerCAmelCase : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCAmelCase : int = image
else:
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(a__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[int] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a__ )
]
_lowerCAmelCase : Optional[int] = torch.cat(a__ , dim=0 )
else:
_lowerCAmelCase : List[Any] = self.movq.encode(a__ ).latent_dist.sample(a__ )
_lowerCAmelCase : Dict = self.movq.config.scaling_factor * init_latents
_lowerCAmelCase : str = torch.cat([init_latents] , dim=0 )
_lowerCAmelCase : Dict = init_latents.shape
_lowerCAmelCase : str = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
# get latents
_lowerCAmelCase : Optional[Any] = self.scheduler.add_noise(a__ , a__ , a__ )
_lowerCAmelCase : int = init_latents
return latents
def __A ( self , a__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : str = torch.device(F"cuda:{gpu_id}" )
_lowerCAmelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __A ( self , a__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase : str = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
_lowerCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self , a__ , a__ , a__ , a__ = 512 , a__ = 512 , a__ = 100 , a__ = 4.0 , a__ = 0.3 , a__ = 1 , a__ = None , a__ = "pil" , a__ = True , ):
_lowerCAmelCase : Dict = self._execution_device
_lowerCAmelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = torch.cat(a__ , dim=0 )
_lowerCAmelCase : Dict = image_embeds.shape[0]
if isinstance(a__ , a__ ):
_lowerCAmelCase : List[Any] = torch.cat(a__ , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : int = image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Any = negative_image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Any = [image]
if not all(isinstance(a__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(a__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
_lowerCAmelCase : Tuple = torch.cat([prepare_image(a__ , a__ , a__ ) for i in image] , dim=0 )
_lowerCAmelCase : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=a__ )
_lowerCAmelCase : Union[str, Any] = self.movq.encode(a__ )["""latents"""]
_lowerCAmelCase : Tuple = latents.repeat_interleave(a__ , dim=0 )
self.scheduler.set_timesteps(a__ , device=a__ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_timesteps(a__ , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCAmelCase , _lowerCAmelCase : Dict = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
_lowerCAmelCase : List[str] = self.prepare_latents(
a__ , a__ , a__ , a__ , image_embeds.dtype , a__ , a__ )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : int = {"""image_embeds""": image_embeds}
_lowerCAmelCase : List[str] = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = variance_pred.chunk(2 )
_lowerCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[str] = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
_lowerCAmelCase : int = self.movq.decode(a__ , force_not_quantize=a__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_lowerCAmelCase : List[Any] = image * 0.5 + 0.5
_lowerCAmelCase : Any = image.clamp(0 , 1 )
_lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 663 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=128 , a__=32 , a__=16 , a__=2 , a__=0.0_2 , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : Optional[int] = seq_length
_lowerCAmelCase : Tuple = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : List[Any] = use_token_type_ids
_lowerCAmelCase : Dict = use_labels
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Any = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Dict = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = max_position_embeddings
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Any = num_labels
_lowerCAmelCase : Union[str, Any] = num_choices
_lowerCAmelCase : List[str] = scope
def __A ( self ):
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_input_mask:
_lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : List[Any] = None
if self.use_token_type_ids:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Any = None
_lowerCAmelCase : str = None
_lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
def __A ( self ):
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Optional[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase : int = True
_lowerCAmelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = NezhaModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = model(a__ , attention_mask=a__ , token_type_ids=a__ )
_lowerCAmelCase : Dict = model(a__ , token_type_ids=a__ )
_lowerCAmelCase : Union[str, Any] = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : int = NezhaModel(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Dict = model(
a__ , attention_mask=a__ , token_type_ids=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : List[Any] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , encoder_hidden_states=a__ , )
_lowerCAmelCase : List[str] = model(a__ , attention_mask=a__ , token_type_ids=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = NezhaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : int = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Any = NezhaForNextSentencePrediction(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = NezhaForPreTraining(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , next_sentence_label=a__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = NezhaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Tuple = NezhaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Dict = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : Any = NezhaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Tuple = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = self.num_choices
_lowerCAmelCase : int = NezhaForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : str = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
_lowerCAmelCase : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Tuple = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase : List[str] = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Dict = True
def __A ( self , a__ , a__ , a__=False ):
_lowerCAmelCase : Optional[Any] = super()._prepare_for_class(a__ , a__ , return_labels=a__ )
if return_labels:
if model_class in get_values(a__ ):
_lowerCAmelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a__ )
_lowerCAmelCase : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
return inputs_dict
def __A ( self ):
_lowerCAmelCase : Any = NezhaModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a__ )
def __A ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowerCAmelCase : str = None
self.model_tester.create_and_check_model_as_decoder(
a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , )
def __A ( self ):
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a__ )
def __A ( self ):
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*a__ )
def __A ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
@slow
def __A ( self ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Any = NezhaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@slow
@require_torch_gpu
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Tuple = model_class(config=a__ )
_lowerCAmelCase : Dict = self._prepare_for_class(a__ , a__ )
_lowerCAmelCase : int = torch.jit.trace(
a__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a__ , os.path.join(a__ , """bert.pt""" ) )
_lowerCAmelCase : str = torch.jit.load(os.path.join(a__ , """bert.pt""" ) , map_location=a__ )
loaded(inputs_dict["""input_ids"""].to(a__ ) , inputs_dict["""attention_mask"""].to(a__ ) )
@require_torch
class __A ( unittest.TestCase ):
@slow
def __A ( self ):
_lowerCAmelCase : Dict = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
_lowerCAmelCase : Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(a__ , attention_mask=a__ )[0]
_lowerCAmelCase : str = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , a__ )
_lowerCAmelCase : List[Any] = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) )
@slow
def __A ( self ):
_lowerCAmelCase : Any = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
_lowerCAmelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase : Union[str, Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(a__ , attention_mask=a__ )[0]
_lowerCAmelCase : int = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , a__ )
_lowerCAmelCase : Any = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) )
| 663 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = DiTPipeline
_UpperCamelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCamelCase : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=a__ , )
_lowerCAmelCase : Optional[int] = AutoencoderKL()
_lowerCAmelCase : Union[str, Any] = DDIMScheduler()
_lowerCAmelCase : Optional[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Any = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Any = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs(a__ )
_lowerCAmelCase : List[str] = pipe(**a__ ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase : List[Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_lowerCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def __A ( self ):
self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : int = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase : Dict = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase : Union[str, Any] = pipe.get_label_ids(a__ )
_lowerCAmelCase : Any = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
F"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def __A ( self ):
_lowerCAmelCase : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase : List[str] = ["""vase""", """umbrella"""]
_lowerCAmelCase : Optional[int] = pipe.get_label_ids(a__ )
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 663 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Tuple = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_a : Dict = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ) -> List[str]:
_lowerCAmelCase : Any = {}
with open(_lowerCamelCase ,"""r""" ) as file:
for line_number, line in enumerate(_lowerCamelCase ):
_lowerCAmelCase : List[str] = line.strip()
if line:
_lowerCAmelCase : Dict = line.split()
_lowerCAmelCase : List[str] = line_number
_lowerCAmelCase : Tuple = words[0]
_lowerCAmelCase : List[str] = value
return result
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : Any ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Any ) -> Dict:
for attribute in key.split(""".""" ):
_lowerCAmelCase : Dict = getattr(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCamelCase ):
_lowerCAmelCase : str = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_lowerCAmelCase : str = """param"""
if weight_type is not None and weight_type != "param":
_lowerCAmelCase : List[Any] = getattr(_lowerCamelCase ,_lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase : List[str] = hf_pointer
for attribute in hf_param_name.split(""".""" ):
_lowerCAmelCase : Any = getattr(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = shape_pointer.shape
# let's reduce dimension
_lowerCAmelCase : List[Any] = value[0]
else:
_lowerCAmelCase : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
_lowerCAmelCase : int = value
elif weight_type == "weight_g":
_lowerCAmelCase : Dict = value
elif weight_type == "weight_v":
_lowerCAmelCase : int = value
elif weight_type == "bias":
_lowerCAmelCase : Union[str, Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
_lowerCAmelCase : str = getattr(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = value
else:
_lowerCAmelCase : str = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Dict ,_lowerCamelCase : str ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : int ) -> Optional[int]:
_lowerCAmelCase : str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_lowerCAmelCase : Union[str, Any] = """param"""
if weight_type is not None and weight_type != "param":
_lowerCAmelCase : Tuple = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase : Union[str, Any] = """.""".join([key, hf_param_name] )
else:
_lowerCAmelCase : Optional[Any] = key
_lowerCAmelCase : List[Any] = value if """lm_head""" in full_key else value[0]
_a : Union[str, Any] = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Dict ,_lowerCamelCase : List[Any]=None ,_lowerCamelCase : Union[str, Any]=None ) -> Any:
_lowerCAmelCase : Any = False
for key, mapped_key in MAPPING.items():
_lowerCAmelCase : Optional[int] = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowerCAmelCase : Optional[int] = True
if "*" in mapped_key:
_lowerCAmelCase : List[Any] = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
_lowerCAmelCase : Dict = mapped_key.replace("""*""" ,_lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase : List[str] = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase : int = """weight_v"""
elif "bias" in name:
_lowerCAmelCase : Optional[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase : List[str] = """weight"""
else:
_lowerCAmelCase : Optional[Any] = None
if hf_dict is not None:
rename_dict(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
else:
set_recursively(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
return is_used
return is_used
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Optional[int] ) -> Optional[Any]:
_lowerCAmelCase : Any = []
_lowerCAmelCase : str = fairseq_model.state_dict()
_lowerCAmelCase : Optional[Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,hf_model.config.feat_extract_norm == """group""" ,)
_lowerCAmelCase : Union[str, Any] = True
else:
_lowerCAmelCase : Optional[int] = load_wavaveca_layer(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ,_lowerCamelCase : int ,_lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : Optional[int] = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase : int = name.split(""".""" )
_lowerCAmelCase : List[Any] = int(items[0] )
_lowerCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_lowerCAmelCase : List[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_lowerCAmelCase : Tuple = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
_lowerCAmelCase : str = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
_lowerCAmelCase : Any = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : Dict ,_lowerCamelCase : str=None ,_lowerCamelCase : Union[str, Any]=None ,_lowerCamelCase : Optional[Any]=True ,_lowerCamelCase : List[Any]=False ) -> Union[str, Any]:
if config_path is not None:
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(_lowerCamelCase )
else:
_lowerCAmelCase : List[Any] = WavaVecaConfig()
if is_seq_class:
_lowerCAmelCase : str = read_txt_into_dict(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = idalabel
_lowerCAmelCase : Optional[int] = WavaVecaForSequenceClassification(_lowerCamelCase )
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16000 ,padding_value=0 ,do_normalize=_lowerCamelCase ,return_attention_mask=_lowerCamelCase ,)
feature_extractor.save_pretrained(_lowerCamelCase )
elif is_finetuned:
if dict_path:
_lowerCAmelCase : Tuple = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase : Optional[Any] = target_dict.pad_index
_lowerCAmelCase : str = target_dict.bos_index
_lowerCAmelCase : Dict = target_dict.eos_index
_lowerCAmelCase : List[Any] = len(target_dict.symbols )
_lowerCAmelCase : Tuple = os.path.join(_lowerCamelCase ,"""vocab.json""" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
_lowerCAmelCase : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : List[str] = 1
with open(_lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : List[Any] = WavaVecaCTCTokenizer(
_lowerCamelCase ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=_lowerCamelCase ,)
_lowerCAmelCase : List[str] = True if config.feat_extract_norm == """layer""" else False
_lowerCAmelCase : Any = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16000 ,padding_value=0 ,do_normalize=_lowerCamelCase ,return_attention_mask=_lowerCamelCase ,)
_lowerCAmelCase : Dict = WavaVecaProcessor(feature_extractor=_lowerCamelCase ,tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
_lowerCAmelCase : int = WavaVecaForCTC(_lowerCamelCase )
else:
_lowerCAmelCase : str = WavaVecaForPreTraining(_lowerCamelCase )
if is_finetuned or is_seq_class:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_lowerCAmelCase : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
_lowerCAmelCase : Tuple = fairseq.tasks.setup_task(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=_lowerCamelCase )
_lowerCAmelCase : List[Any] = model[0].eval()
recursively_load_weights(_lowerCamelCase ,_lowerCamelCase ,not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_a : Tuple = parser.parse_args()
_a : str = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 663 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : Tuple = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 663 | 1 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> int:
if not nums:
return 0
_lowerCAmelCase : Union[str, Any] = nums[0]
_lowerCAmelCase : Any = 0
for num in nums[1:]:
_lowerCAmelCase , _lowerCAmelCase : List[Any] = (
max_excluding + num,
max(_lowerCamelCase ,_lowerCamelCase ),
)
return max(_lowerCamelCase ,_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 663 |
"""simple docstring"""
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> List[Any]:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[int] = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_lowerCAmelCase : List[str] = subprocess.run(_lowerCamelCase ,shell=_lowerCamelCase ,stdout=subprocess.PIPE )
_lowerCAmelCase : int = output.stdout.decode("""utf-8""" )
_lowerCAmelCase : Tuple = json.loads(_lowerCamelCase )
_lowerCAmelCase : int = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" ,"""w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : int = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return values.split(""",""" )
_a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_a : Tuple = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 663 | 1 |
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __A ( unittest.TestCase ):
def __A ( self ):
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase : str = mock.Mock()
_lowerCAmelCase : Any = 500
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : str = HTTPError
_lowerCAmelCase : str = {}
# Download this model to make sure it's in the cache.
_lowerCAmelCase : int = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=a__ ) as mock_head:
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __A ( self ):
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase : Any = mock.Mock()
_lowerCAmelCase : Any = 500
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : List[Any] = HTTPError
_lowerCAmelCase : List[str] = {}
# Download this model to make sure it's in the cache.
_lowerCAmelCase : List[str] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=a__ ) as mock_head:
_lowerCAmelCase : Optional[int] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self ):
# This test is for deprecated behavior and can be removed in v5
try:
_lowerCAmelCase : Tuple = tempfile.mktemp()
with open(a__ , """wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" , a__ )
_lowerCAmelCase : Optional[int] = AlbertTokenizer.from_pretrained(a__ )
finally:
os.remove(a__ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" , """wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" , a__ )
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def __A ( self ):
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase : Optional[int] = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class __A ( unittest.TestCase ):
_UpperCamelCase : int = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __A ( cls ):
_lowerCAmelCase : int = TOKEN
HfFolder.save_token(a__ )
@classmethod
def __A ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def __A ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Optional[Any] = os.path.join(a__ , """vocab.txt""" )
with open(a__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_lowerCAmelCase : Any = BertTokenizer(a__ )
tokenizer.push_to_hub("""test-tokenizer""" , use_auth_token=self._token )
_lowerCAmelCase : Optional[Any] = BertTokenizer.from_pretrained(F"{USER}/test-tokenizer" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(a__ , repo_id="""test-tokenizer""" , push_to_hub=a__ , use_auth_token=self._token )
_lowerCAmelCase : str = BertTokenizer.from_pretrained(F"{USER}/test-tokenizer" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Dict = os.path.join(a__ , """vocab.txt""" )
with open(a__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_lowerCAmelCase : Optional[int] = BertTokenizer(a__ )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" , use_auth_token=self._token )
_lowerCAmelCase : List[str] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
a__ , repo_id="""valid_org/test-tokenizer-org""" , push_to_hub=a__ , use_auth_token=self._token )
_lowerCAmelCase : List[str] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def __A ( self ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : str = os.path.join(a__ , """vocab.txt""" )
with open(a__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_lowerCAmelCase : Optional[int] = CustomTokenizer(a__ )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(F"{USER}/test-dynamic-tokenizer" , trust_remote_code=a__ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : int = os.path.join(a__ , """vocab.txt""" )
with open(a__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_lowerCAmelCase : List[Any] = BertTokenizerFast.from_pretrained(a__ )
bert_tokenizer.save_pretrained(a__ )
_lowerCAmelCase : Union[str, Any] = CustomTokenizerFast.from_pretrained(a__ )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
_lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(F"{USER}/test-dynamic-tokenizer" , trust_remote_code=a__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizerFast""" )
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(
F"{USER}/test-dynamic-tokenizer" , use_fast=a__ , trust_remote_code=a__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : List[Any] = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def __A ( self ):
_lowerCAmelCase : Dict = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS]""", """ This is a """, """extra_id_100"""] )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) , ["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) , ["""BC""", """A"""] )
def __A ( self ):
_lowerCAmelCase : int = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def __A ( self ):
_lowerCAmelCase : int = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def __A ( self ):
_lowerCAmelCase : List[Any] = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) , ["""AB""", """C"""] )
def __A ( self ):
_lowerCAmelCase : str = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) , ["""ABC""", """D"""] )
def __A ( self ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
_lowerCAmelCase : Optional[int] = Trie()
_lowerCAmelCase : Any = trie.cut_text("""ABC""" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(a__ , ["""AB""", """C"""] )
| 663 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 663 | 1 |
"""simple docstring"""
import os
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(_lowerCamelCase ) ,_lowerCamelCase ) ) as input_file:
_lowerCAmelCase : Dict = [
[int(_lowerCamelCase ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
_lowerCAmelCase : str = len(_lowerCamelCase )
_lowerCAmelCase : Tuple = len(matrix[0] )
_lowerCAmelCase : Dict = [[-1 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = matrix[i][0]
for j in range(1 ,_lowerCamelCase ):
for i in range(_lowerCamelCase ):
_lowerCAmelCase : int = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 ,_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = min(
minimal_path_sums[i][j] ,minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 ,-1 ,-1 ):
_lowerCAmelCase : List[Any] = min(
minimal_path_sums[i][j] ,minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 663 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int:
_lowerCAmelCase : List[str] = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,_lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 663 | 1 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __A :
def __init__( self , a__ , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Tuple = 13
_lowerCAmelCase : Tuple = 7
_lowerCAmelCase : Any = 30
_lowerCAmelCase : Optional[int] = self.seq_length + self.mem_len
_lowerCAmelCase : Dict = 15
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[str] = 99
_lowerCAmelCase : List[Any] = [10, 50, 80]
_lowerCAmelCase : Tuple = 32
_lowerCAmelCase : int = 32
_lowerCAmelCase : Dict = 4
_lowerCAmelCase : List[str] = 8
_lowerCAmelCase : Tuple = 128
_lowerCAmelCase : Any = 2
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Optional[int] = self.vocab_size - 1
_lowerCAmelCase : Dict = 0.0_1
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __A ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = TFTransfoXLModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a}
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model(a__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = TFTransfoXLLMHeadModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase : Dict = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
_lowerCAmelCase : Any = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = TFTransfoXLForSequenceClassification(a__ )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Tuple = () if is_tf_available() else ()
_UpperCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : str = False
_UpperCamelCase : str = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , d_embed=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a__ )
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(a__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowerCAmelCase : str = model.get_output_embeddings()
assert isinstance(a__ , tf.keras.layers.Layer )
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
else:
_lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
def __A ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __A ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFTransfoXLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __A ( self ):
pass
@require_tf
class __A ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_lowerCAmelCase : List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCAmelCase : List[Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCAmelCase : Tuple = model.generate(a__ , max_length=200 , do_sample=a__ )
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
| 663 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Tuple = logging.get_logger(__name__)
_a : str = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class __A ( _lowerCamelCase ):
_UpperCamelCase : Optional[int] = "unispeech-sat"
def __init__( self , a__=32 , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.1 , a__=0.1 , a__=0.0_2 , a__=1e-5 , a__="group" , a__="gelu" , a__=(512, 512, 512, 512, 512, 512, 512) , a__=(5, 2, 2, 2, 2, 2, 2) , a__=(10, 3, 3, 3, 3, 2, 2) , a__=False , a__=128 , a__=16 , a__=False , a__=True , a__=0.0_5 , a__=10 , a__=2 , a__=0.0 , a__=10 , a__=0 , a__=320 , a__=2 , a__=0.1 , a__=100 , a__=256 , a__=256 , a__=0.1 , a__="mean" , a__=False , a__=False , a__=256 , a__=(512, 512, 512, 512, 1500) , a__=(5, 3, 3, 1, 1) , a__=(1, 2, 3, 1, 1) , a__=512 , a__=0 , a__=1 , a__=2 , a__=504 , **a__ , ):
super().__init__(**A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ )
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : Tuple = feat_extract_norm
_lowerCAmelCase : int = feat_extract_activation
_lowerCAmelCase : List[Any] = list(A__ )
_lowerCAmelCase : List[str] = list(A__ )
_lowerCAmelCase : List[Any] = list(A__ )
_lowerCAmelCase : Any = conv_bias
_lowerCAmelCase : str = num_conv_pos_embeddings
_lowerCAmelCase : Any = num_conv_pos_embedding_groups
_lowerCAmelCase : int = len(self.conv_dim )
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : Tuple = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : Optional[Any] = hidden_dropout
_lowerCAmelCase : List[Any] = attention_dropout
_lowerCAmelCase : Union[str, Any] = activation_dropout
_lowerCAmelCase : str = feat_proj_dropout
_lowerCAmelCase : Any = final_dropout
_lowerCAmelCase : int = layerdrop
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : List[Any] = num_clusters
_lowerCAmelCase : str = do_stable_layer_norm
_lowerCAmelCase : int = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Any = apply_spec_augment
_lowerCAmelCase : List[Any] = mask_time_prob
_lowerCAmelCase : List[Any] = mask_time_length
_lowerCAmelCase : List[Any] = mask_time_min_masks
_lowerCAmelCase : List[Any] = mask_feature_prob
_lowerCAmelCase : Union[str, Any] = mask_feature_length
_lowerCAmelCase : Any = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : List[Any] = num_codevectors_per_group
_lowerCAmelCase : Union[str, Any] = num_codevector_groups
_lowerCAmelCase : Optional[Any] = contrastive_logits_temperature
_lowerCAmelCase : str = feat_quantizer_dropout
_lowerCAmelCase : Dict = num_negatives
_lowerCAmelCase : Optional[int] = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Any = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Optional[int] = ctc_loss_reduction
_lowerCAmelCase : List[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : Any = list(A__ )
_lowerCAmelCase : Optional[int] = list(A__ )
_lowerCAmelCase : int = list(A__ )
_lowerCAmelCase : Union[str, Any] = xvector_output_dim
@property
def __A ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 700 |
"""simple docstring"""
from PIL import Image
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ,_lowerCamelCase : int ) -> Image:
_lowerCAmelCase : Any = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
_a : str = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 663 | 0 |
"""simple docstring"""
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_a : Optional[int] = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any]=None ) -> Any:
require_version(deps[pkg] ,__A )
| 701 |
"""simple docstring"""
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A :
def __init__( self ):
_lowerCAmelCase : Union[str, Any] = [
[],
[],
[],
]
def __A ( self , a__ , a__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(a__ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __A ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ):
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class __A :
def __init__( self ):
_lowerCAmelCase : int = []
def __A ( self , a__ ):
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(a__ )
def __A ( self ):
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
_lowerCAmelCase : int = min(self.queue )
self.queue.remove(a__ )
return data
def __str__( self ):
return str(self.queue )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Tuple = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 663 | 0 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_a : List[str] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __A :
_UpperCamelCase : List[str] = PegasusConfig
_UpperCamelCase : Dict = {}
_UpperCamelCase : Optional[Any] = """gelu"""
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=False , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__=0.1 , a__=0.1 , a__=20 , a__=2 , a__=1 , a__=0 , ):
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : Optional[int] = seq_length
_lowerCAmelCase : List[Any] = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : Any = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Any = eos_token_id
_lowerCAmelCase : str = pad_token_id
_lowerCAmelCase : Union[str, Any] = bos_token_id
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_lowerCAmelCase : Tuple = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCAmelCase : Optional[int] = np.concatenate([input_ids, eos_tensor] , axis=1 )
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowerCAmelCase : Tuple = prepare_pegasus_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return config, inputs_dict
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Any = 20
_lowerCAmelCase : int = model_class_name(UpperCAmelCase_ )
_lowerCAmelCase : Optional[int] = model.encode(inputs_dict["""input_ids"""] )
_lowerCAmelCase , _lowerCAmelCase : Any = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_lowerCAmelCase : List[str] = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_lowerCAmelCase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCAmelCase : Any = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
_lowerCAmelCase : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_lowerCAmelCase : Tuple = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase_ , )
_lowerCAmelCase : str = model.decode(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = 20
_lowerCAmelCase : List[str] = model_class_name(UpperCAmelCase_ )
_lowerCAmelCase : Optional[Any] = model.encode(inputs_dict["""input_ids"""] )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_lowerCAmelCase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowerCAmelCase : List[str] = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
_lowerCAmelCase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_lowerCAmelCase : int = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
_lowerCAmelCase : Any = model.decode(UpperCAmelCase_ , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ )
_lowerCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : List[Any] ,_lowerCamelCase : int ,_lowerCamelCase : Union[str, Any]=None ,_lowerCamelCase : int=None ,) -> Dict:
if attention_mask is None:
_lowerCAmelCase : List[Any] = np.not_equal(_SCREAMING_SNAKE_CASE ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_lowerCAmelCase : Union[str, Any] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __A ( snake_case__ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_UpperCamelCase : List[str] = True
_UpperCamelCase : str = False
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : List[str] = False
def __A ( self ):
_lowerCAmelCase : List[str] = FlaxPegasusModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self , config_class=UpperCAmelCase_ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase : Union[str, Any] = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase : int = model_class(UpperCAmelCase_ )
@jax.jit
def encode_jitted(a__ , a__=None , **a__ ):
return model.encode(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
with self.subTest("""JIT Enabled""" ):
_lowerCAmelCase : Optional[int] = encode_jitted(**UpperCAmelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_lowerCAmelCase : List[str] = encode_jitted(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase : List[str] = model_class(UpperCAmelCase_ )
_lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_lowerCAmelCase : List[str] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(a__ , a__ , a__ ):
return model.decode(
decoder_input_ids=UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , encoder_outputs=UpperCAmelCase_ , )
with self.subTest("""JIT Enabled""" ):
_lowerCAmelCase : Dict = decode_jitted(**UpperCAmelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_lowerCAmelCase : List[str] = decode_jitted(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __A ( self ):
for model_class_name in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCAmelCase_ )
_lowerCAmelCase : List[Any] = np.ones((1, 1) )
_lowerCAmelCase : Dict = model(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@slow
def __A ( self ):
_lowerCAmelCase : List[str] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
_lowerCAmelCase : Union[str, Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
_lowerCAmelCase : int = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_lowerCAmelCase : List[str] = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
_lowerCAmelCase : List[str] = tokenizer(UpperCAmelCase_ , return_tensors="""np""" , truncation=UpperCAmelCase_ , max_length=512 , padding=UpperCAmelCase_ )
_lowerCAmelCase : Union[str, Any] = model.generate(**UpperCAmelCase_ , num_beams=2 ).sequences
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
assert tgt_text == decoded
| 702 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A ( unittest.TestCase ):
_UpperCamelCase : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_UpperCamelCase : Any = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = AudioClassificationPipeline(model=a__ , feature_extractor=a__ )
# test with a raw waveform
_lowerCAmelCase : Optional[int] = np.zeros((34000,) )
_lowerCAmelCase : Optional[Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def __A ( self , a__ , a__ ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = examples
_lowerCAmelCase : List[Any] = audio_classifier(a__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
_lowerCAmelCase : Tuple = audio_classifier(a__ , top_k=1 )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
self.run_torchaudio(a__ )
@require_torchaudio
def __A ( self , a__ ):
import datasets
# test with a local file
_lowerCAmelCase : int = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_lowerCAmelCase : List[Any] = dataset[0]["""audio"""]["""array"""]
_lowerCAmelCase : str = audio_classifier(a__ )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
@require_torch
def __A ( self ):
_lowerCAmelCase : int = """anton-l/wav2vec2-random-tiny-classifier"""
_lowerCAmelCase : Optional[Any] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : Any = np.ones((8000,) )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
_lowerCAmelCase : List[str] = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
_lowerCAmelCase : str = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_lowerCAmelCase : int = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_lowerCAmelCase : int = audio_classifier(a__ , top_k=4 )
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A ( self ):
import datasets
_lowerCAmelCase : Optional[Any] = """superb/wav2vec2-base-superb-ks"""
_lowerCAmelCase : List[str] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : str = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_lowerCAmelCase : Optional[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
self.assertEqual(
nested_simplify(a__ , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __A ( self ):
pass
| 663 | 0 |
"""simple docstring"""
from typing import Any
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> list[Any]:
if not input_list:
return []
_lowerCAmelCase : Tuple = [input_list.count(_lowerCamelCase ) for value in input_list]
_lowerCAmelCase : List[Any] = max(_lowerCamelCase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_lowerCamelCase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Dict=8 ) -> Any:
_lowerCAmelCase : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any=512 ,_lowerCamelCase : Dict=512 ) -> List[Any]:
_lowerCAmelCase : Any = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 )
_lowerCAmelCase : Dict = np.array(pil_image.convert("""RGB""" ) )
_lowerCAmelCase : List[str] = arr.astype(np.floataa ) / 1_27.5 - 1
_lowerCAmelCase : int = np.transpose(_lowerCamelCase ,[2, 0, 1] )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
_lowerCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __A ( self , a__ , a__ , a__ ):
# get the original timestep using init_timestep
_lowerCAmelCase : Optional[Any] = min(int(num_inference_steps * strength ) , a__ )
_lowerCAmelCase : List[Any] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__=None ):
if not isinstance(a__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a__ )}" )
_lowerCAmelCase : Union[str, Any] = image.to(device=a__ , dtype=a__ )
_lowerCAmelCase : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCAmelCase : int = image
else:
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(a__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[int] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a__ )
]
_lowerCAmelCase : Optional[int] = torch.cat(a__ , dim=0 )
else:
_lowerCAmelCase : List[Any] = self.movq.encode(a__ ).latent_dist.sample(a__ )
_lowerCAmelCase : Dict = self.movq.config.scaling_factor * init_latents
_lowerCAmelCase : str = torch.cat([init_latents] , dim=0 )
_lowerCAmelCase : Dict = init_latents.shape
_lowerCAmelCase : str = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
# get latents
_lowerCAmelCase : Optional[Any] = self.scheduler.add_noise(a__ , a__ , a__ )
_lowerCAmelCase : int = init_latents
return latents
def __A ( self , a__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : str = torch.device(F"cuda:{gpu_id}" )
_lowerCAmelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __A ( self , a__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase : str = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
_lowerCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self , a__ , a__ , a__ , a__ = 512 , a__ = 512 , a__ = 100 , a__ = 4.0 , a__ = 0.3 , a__ = 1 , a__ = None , a__ = "pil" , a__ = True , ):
_lowerCAmelCase : Dict = self._execution_device
_lowerCAmelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = torch.cat(a__ , dim=0 )
_lowerCAmelCase : Dict = image_embeds.shape[0]
if isinstance(a__ , a__ ):
_lowerCAmelCase : List[Any] = torch.cat(a__ , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : int = image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Any = negative_image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Any = [image]
if not all(isinstance(a__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(a__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
_lowerCAmelCase : Tuple = torch.cat([prepare_image(a__ , a__ , a__ ) for i in image] , dim=0 )
_lowerCAmelCase : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=a__ )
_lowerCAmelCase : Union[str, Any] = self.movq.encode(a__ )["""latents"""]
_lowerCAmelCase : Tuple = latents.repeat_interleave(a__ , dim=0 )
self.scheduler.set_timesteps(a__ , device=a__ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_timesteps(a__ , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCAmelCase , _lowerCAmelCase : Dict = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
_lowerCAmelCase : List[str] = self.prepare_latents(
a__ , a__ , a__ , a__ , image_embeds.dtype , a__ , a__ )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : int = {"""image_embeds""": image_embeds}
_lowerCAmelCase : List[str] = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = variance_pred.chunk(2 )
_lowerCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[str] = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
_lowerCAmelCase : int = self.movq.decode(a__ , force_not_quantize=a__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_lowerCAmelCase : List[Any] = image * 0.5 + 0.5
_lowerCAmelCase : Any = image.clamp(0 , 1 )
_lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 663 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_a : List[str] = logging.get_logger(__name__)
_a : Any = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = "dpt"
def __init__( self , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.0 , a__=0.0 , a__=0.0_2 , a__=1e-12 , a__=384 , a__=16 , a__=3 , a__=False , a__=True , a__=[2, 5, 8, 11] , a__="project" , a__=[4, 2, 1, 0.5] , a__=[96, 192, 384, 768] , a__=256 , a__=-1 , a__=False , a__=True , a__=0.4 , a__=255 , a__=0.1 , a__=[1, 1024, 24, 24] , a__=[0, 1] , a__=None , **a__ , ):
super().__init__(**UpperCamelCase_ )
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Optional[int] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
_lowerCAmelCase : Optional[int] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
_lowerCAmelCase : List[str] = BitConfig(**UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
_lowerCAmelCase : List[Any] = BitConfig(**UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowerCAmelCase : Any = backbone_config
else:
raise ValueError(
F"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." )
_lowerCAmelCase : Optional[int] = backbone_featmap_shape
_lowerCAmelCase : str = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be \'project\' when using `DPT-hybrid` mode.""" )
else:
_lowerCAmelCase : str = None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Any = []
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : List[Any] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : int = num_channels
_lowerCAmelCase : Union[str, Any] = qkv_bias
_lowerCAmelCase : int = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of [\'ignore\', \'add\', \'project\']""" )
_lowerCAmelCase : Any = readout_type
_lowerCAmelCase : str = reassemble_factors
_lowerCAmelCase : Union[str, Any] = neck_hidden_sizes
_lowerCAmelCase : Any = fusion_hidden_size
_lowerCAmelCase : Optional[int] = head_in_index
_lowerCAmelCase : str = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[Any] = use_auxiliary_head
_lowerCAmelCase : Any = auxiliary_loss_weight
_lowerCAmelCase : Tuple = semantic_loss_ignore_index
_lowerCAmelCase : Optional[int] = semantic_classifier_dropout
def __A ( self ):
_lowerCAmelCase : Tuple = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_lowerCAmelCase : Optional[int] = self.backbone_config.to_dict()
_lowerCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 704 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = ShapEPipeline
_UpperCamelCase : Optional[Any] = ["prompt"]
_UpperCamelCase : Tuple = ["prompt"]
_UpperCamelCase : Dict = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : str = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 8
@property
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(a__ )
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_lowerCAmelCase : Any = PriorTransformer(**a__ )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase : Dict = ShapERenderer(**a__ )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_prior
_lowerCAmelCase : Any = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : Dict = self.dummy_renderer
_lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=a__ , clip_sample=a__ , clip_sample_range=1.0 , )
_lowerCAmelCase : List[Any] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[str] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Dict = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : str = self.pipeline_class(**a__ )
_lowerCAmelCase : List[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[str] = output.images[0]
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ):
_lowerCAmelCase : Any = torch_device == """cpu"""
_lowerCAmelCase : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a__ , relax_max_difference=a__ , )
def __A ( self ):
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = self.pipeline_class(**a__ )
_lowerCAmelCase : int = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(a__ )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase : str = batch_size * [inputs[key]]
_lowerCAmelCase : Tuple = pipe(**a__ , num_images_per_prompt=a__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
_lowerCAmelCase : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
_lowerCAmelCase : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : Any = pipe(
"""a shark""" , generator=a__ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a__ , a__ )
| 663 | 0 |
"""simple docstring"""
import os
def SCREAMING_SNAKE_CASE ( ) -> str:
with open(os.path.dirname(_A ) + """/p022_names.txt""" ) as file:
_lowerCAmelCase : List[Any] = str(file.readlines()[0] )
_lowerCAmelCase : List[Any] = names.replace("""\"""" ,"""""" ).split(""",""" )
names.sort()
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Dict = 0
for i, name in enumerate(_A ):
for letter in name:
name_score += ord(_A ) - 64
total_score += (i + 1) * name_score
_lowerCAmelCase : Optional[int] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 705 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = CpmAntTokenizer
_UpperCamelCase : List[Any] = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Dict = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __A ( self ):
_lowerCAmelCase : Tuple = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_lowerCAmelCase : Optional[Any] = """今天天气真好!"""
_lowerCAmelCase : Any = ["""今天""", """天气""", """真""", """好""", """!"""]
_lowerCAmelCase : str = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = """今天天气真好!"""
_lowerCAmelCase : Optional[Any] = [tokenizer.bos_token] + tokens
_lowerCAmelCase : Optional[int] = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
_lowerCAmelCase : Tuple = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 663 | 0 |
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a : Any = {
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_a : Optional[int] = logging.get_logger(__name__)
class __A ( __lowerCamelCase ):
_UpperCamelCase : Optional[int] = "mask2former"
_UpperCamelCase : Optional[Any] = ["swin"]
_UpperCamelCase : Any = {"hidden_size": "hidden_dim"}
def __init__( self , a__ = None , a__ = 256 , a__ = 256 , a__ = 256 , a__ = 1024 , a__ = "relu" , a__ = 6 , a__ = 10 , a__ = 8 , a__ = 0.0 , a__ = 2048 , a__ = False , a__ = False , a__ = 4 , a__ = 255 , a__ = 100 , a__ = 0.1 , a__ = 2.0 , a__ = 5.0 , a__ = 5.0 , a__ = 12544 , a__ = 3.0 , a__ = 0.7_5 , a__ = 0.0_2 , a__ = 1.0 , a__ = True , a__ = [4, 8, 16, 32] , a__ = None , **a__ , ):
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
_lowerCAmelCase : Tuple = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=UpperCAmelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase : Optional[Any] = backbone_config.pop("""model_type""" )
_lowerCAmelCase : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Dict = config_class.from_dict(UpperCAmelCase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
F"Supported model types: {','.join(self.backbones_supported )}" )
_lowerCAmelCase : Optional[Any] = backbone_config
_lowerCAmelCase : List[Any] = feature_size
_lowerCAmelCase : Dict = mask_feature_size
_lowerCAmelCase : Optional[Any] = hidden_dim
_lowerCAmelCase : Any = encoder_feedforward_dim
_lowerCAmelCase : Optional[Any] = activation_function
_lowerCAmelCase : Tuple = encoder_layers
_lowerCAmelCase : Optional[int] = decoder_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : Optional[int] = dropout
_lowerCAmelCase : Dict = dim_feedforward
_lowerCAmelCase : List[str] = pre_norm
_lowerCAmelCase : Dict = enforce_input_projection
_lowerCAmelCase : Union[str, Any] = common_stride
_lowerCAmelCase : Dict = ignore_value
_lowerCAmelCase : List[Any] = num_queries
_lowerCAmelCase : List[Any] = no_object_weight
_lowerCAmelCase : Optional[int] = class_weight
_lowerCAmelCase : Dict = mask_weight
_lowerCAmelCase : Tuple = dice_weight
_lowerCAmelCase : Any = train_num_points
_lowerCAmelCase : str = oversample_ratio
_lowerCAmelCase : Any = importance_sample_ratio
_lowerCAmelCase : Union[str, Any] = init_std
_lowerCAmelCase : str = init_xavier_std
_lowerCAmelCase : List[Any] = use_auxiliary_loss
_lowerCAmelCase : List[str] = feature_strides
_lowerCAmelCase : str = output_auxiliary_logits
_lowerCAmelCase : List[Any] = decoder_layers
super().__init__(**UpperCAmelCase_ )
@classmethod
def __A ( cls , a__ , **a__ ):
return cls(
backbone_config=UpperCAmelCase_ , **UpperCAmelCase_ , )
def __A ( self ):
_lowerCAmelCase : Tuple = copy.deepcopy(self.__dict__ )
_lowerCAmelCase : Union[str, Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Any = self.__class__.model_type
return output
| 706 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = CodeGenTokenizer
_UpperCamelCase : Dict = CodeGenTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : List[Any] = {"add_prefix_space": True}
_UpperCamelCase : str = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_lowerCAmelCase : Optional[int] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Any = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Tuple = """lower newer"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Any = """lower newer"""
# Testing tokenization
_lowerCAmelCase : Any = tokenizer.tokenize(a__ , add_prefix_space=a__ )
_lowerCAmelCase : int = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
_lowerCAmelCase : Dict = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_lowerCAmelCase : int = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
_lowerCAmelCase : Any = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
_lowerCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self , *a__ , **a__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __A ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_lowerCAmelCase : Dict = """This is a simple input"""
_lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Optional[int] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __A ( self ):
_lowerCAmelCase : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
_lowerCAmelCase : Union[str, Any] = """This is a simple input"""
_lowerCAmelCase : Dict = ["""This is a simple input looooooooong""", """This is a simple input"""]
_lowerCAmelCase : Any = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Optional[int] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_lowerCAmelCase : Optional[int] = tokenizer.pad_token_id
_lowerCAmelCase : Any = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
_lowerCAmelCase : str = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __A ( self ):
_lowerCAmelCase : List[str] = """$$$"""
_lowerCAmelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
_lowerCAmelCase : Tuple = """This is a simple input"""
_lowerCAmelCase : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : str = tokenizer(a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __A ( self ):
_lowerCAmelCase : int = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_lowerCAmelCase : Optional[int] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_lowerCAmelCase : List[Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_lowerCAmelCase : Tuple = tokenizer.encode(a__ )
_lowerCAmelCase : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_lowerCAmelCase : int = tokenizer.decode(a__ , truncate_before_pattern=a__ )
self.assertEqual(a__ , a__ )
def __A ( self ):
pass
| 663 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class __A ( UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = 'bert-generation'
def __init__( self , a__=50358 , a__=1024 , a__=24 , a__=16 , a__=4096 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=0.0_2 , a__=1e-12 , a__=0 , a__=2 , a__=1 , a__="absolute" , a__=True , **a__ , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Any = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = max_position_embeddings
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : int = use_cache
| 707 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : int = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A ( __A ):
_UpperCamelCase = "dandelin/vilt-b32-finetuned-vqa"
_UpperCamelCase = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
_UpperCamelCase = "image_qa"
_UpperCamelCase = AutoProcessor
_UpperCamelCase = AutoModelForVisualQuestionAnswering
_UpperCamelCase = ["image", "text"]
_UpperCamelCase = ["text"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""vision"""] )
super().__init__(*a__ , **a__ )
def __A ( self , a__ , a__ ):
return self.pre_processor(a__ , a__ , return_tensors="""pt""" )
def __A ( self , a__ ):
with torch.no_grad():
return self.model(**a__ ).logits
def __A ( self , a__ ):
_lowerCAmelCase : str = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 708 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> bool:
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a : int = int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 663 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __A ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=3 , a__=18 , a__=30 , a__=400 , a__=True , a__=None , a__=True , a__=None , a__=True , a__=[0.5, 0.5, 0.5] , a__=[0.5, 0.5, 0.5] , ):
_lowerCAmelCase : List[Any] = size if size is not None else {"""shortest_edge""": 18}
_lowerCAmelCase : Optional[Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Union[str, Any] = batch_size
_lowerCAmelCase : Union[str, Any] = num_channels
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : Any = min_resolution
_lowerCAmelCase : List[str] = max_resolution
_lowerCAmelCase : List[Any] = do_resize
_lowerCAmelCase : Tuple = size
_lowerCAmelCase : Optional[Any] = do_center_crop
_lowerCAmelCase : Dict = crop_size
_lowerCAmelCase : Any = do_normalize
_lowerCAmelCase : List[Any] = image_mean
_lowerCAmelCase : Optional[int] = image_std
def __A ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Tuple = LevitImageProcessor if is_vision_available() else None
def __A ( self ):
_lowerCAmelCase : int = LevitImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
def __A ( self ):
_lowerCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __A ( self ):
pass
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase : Optional[Any] = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase : str = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase : Tuple = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 709 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __A :
_UpperCamelCase : int
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def SCREAMING_SNAKE_CASE ( ) -> Node | None:
_lowerCAmelCase : Tuple = Node(1 )
_lowerCAmelCase : int = Node(2 )
_lowerCAmelCase : int = Node(3 )
_lowerCAmelCase : Any = Node(4 )
_lowerCAmelCase : Dict = Node(5 )
return tree
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> int:
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
if root is None:
return output
_lowerCAmelCase : Union[str, Any] = deque([root] )
while process_queue:
_lowerCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
_lowerCAmelCase : list[Sequence[Node | None]] = []
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = height(_lowerCamelCase )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Any = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = 0
return output
def SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
_lowerCAmelCase : int = make_tree()
print(f"In-order Traversal: {inorder(_lowerCamelCase )}" )
print(f"Pre-order Traversal: {preorder(_lowerCamelCase )}" )
print(f"Post-order Traversal: {postorder(_lowerCamelCase )}" ,"""\n""" )
print(f"Height of Tree: {height(_lowerCamelCase )}" ,"""\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_lowerCamelCase ) ,"""\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 ,height(_lowerCamelCase ) + 1 ):
print(f"Level {level}:" ,get_nodes_from_left_to_right(_lowerCamelCase ,level=_lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 663 | 0 |
"""simple docstring"""
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 710 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : Optional[Any] = torch.optim.SGD(model.parameters() , 0.1 )
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase : Tuple = accelerator.prepare(a__ )
try:
pickle.loads(pickle.dumps(a__ ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 663 | 0 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__=0.0_1 , a__=1000 ):
_lowerCAmelCase : Any = p_stop
_lowerCAmelCase : Tuple = max_length
def __iter__( self ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = False
while not stop and count < self.max_length:
yield count
count += 1
_lowerCAmelCase : List[Any] = random.random() < self.p_stop
class __A ( unittest.TestCase ):
def __A ( self , a__ , a__ , a__=False , a__=True ):
_lowerCAmelCase : Tuple = [
BatchSamplerShard(__A , 2 , __A , split_batches=__A , even_batches=__A )
for i in range(2 )
]
_lowerCAmelCase : Dict = [list(__A ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__A ) for shard in batch_sampler_shards] , [len(__A ) for e in expected] )
self.assertListEqual(__A , __A )
def __A ( self ):
# Check the shards when the dataset is a round multiple of total batch size.
_lowerCAmelCase : int = BatchSampler(range(24 ) , batch_size=3 , drop_last=__A )
_lowerCAmelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__A , __A )
_lowerCAmelCase : Any = BatchSampler(range(24 ) , batch_size=3 , drop_last=__A )
# Expected shouldn't change
self.check_batch_sampler_shards(__A , __A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCAmelCase : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=__A )
_lowerCAmelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__A , __A )
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=__A )
_lowerCAmelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__A , __A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCAmelCase : Optional[int] = BatchSampler(range(22 ) , batch_size=3 , drop_last=__A )
_lowerCAmelCase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__A , __A )
_lowerCAmelCase : Any = BatchSampler(range(22 ) , batch_size=3 , drop_last=__A )
_lowerCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__A , __A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCAmelCase : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=__A )
_lowerCAmelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__A , __A )
_lowerCAmelCase : Tuple = BatchSampler(range(20 ) , batch_size=3 , drop_last=__A )
_lowerCAmelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__A , __A )
# Check the shards when the dataset is very small.
_lowerCAmelCase : Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=__A )
_lowerCAmelCase : List[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__A , __A )
_lowerCAmelCase : Tuple = BatchSampler(range(2 ) , batch_size=3 , drop_last=__A )
_lowerCAmelCase : Dict = [[], []]
self.check_batch_sampler_shards(__A , __A )
def __A ( self ):
# Check the shards when the dataset is a round multiple of batch size.
_lowerCAmelCase : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=__A )
_lowerCAmelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__A , __A , split_batches=__A )
_lowerCAmelCase : Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__A )
# Expected shouldn't change
self.check_batch_sampler_shards(__A , __A , split_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCAmelCase : int = BatchSampler(range(22 ) , batch_size=4 , drop_last=__A )
_lowerCAmelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__A , __A , split_batches=__A )
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__A )
_lowerCAmelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__A , __A , split_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCAmelCase : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=__A )
_lowerCAmelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__A , __A , split_batches=__A )
_lowerCAmelCase : List[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=__A )
_lowerCAmelCase : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__A , __A , split_batches=__A )
# Check the shards when the dataset is very small.
_lowerCAmelCase : Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=__A )
_lowerCAmelCase : List[Any] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__A , __A , split_batches=__A )
_lowerCAmelCase : Dict = BatchSampler(range(2 ) , batch_size=4 , drop_last=__A )
_lowerCAmelCase : int = [[], []]
self.check_batch_sampler_shards(__A , __A , split_batches=__A )
def __A ( self ):
# Check the shards when the dataset is a round multiple of total batch size.
_lowerCAmelCase : List[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=__A )
_lowerCAmelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__A , __A , even_batches=__A )
_lowerCAmelCase : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=__A )
# Expected shouldn't change
self.check_batch_sampler_shards(__A , __A , even_batches=__A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCAmelCase : List[str] = BatchSampler(range(21 ) , batch_size=3 , drop_last=__A )
_lowerCAmelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__A , __A , even_batches=__A )
_lowerCAmelCase : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=__A )
_lowerCAmelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__A , __A , even_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCAmelCase : Optional[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=__A )
_lowerCAmelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__A , __A , even_batches=__A )
_lowerCAmelCase : Optional[int] = BatchSampler(range(22 ) , batch_size=3 , drop_last=__A )
_lowerCAmelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__A , __A , even_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCAmelCase : Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__A )
_lowerCAmelCase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__A , __A , even_batches=__A )
_lowerCAmelCase : List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__A )
_lowerCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__A , __A , even_batches=__A )
# Check the shards when the dataset is very small.
_lowerCAmelCase : Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=__A )
_lowerCAmelCase : Optional[int] = [[[0, 1]], []]
self.check_batch_sampler_shards(__A , __A , even_batches=__A )
_lowerCAmelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__A )
_lowerCAmelCase : List[str] = [[], []]
self.check_batch_sampler_shards(__A , __A , even_batches=__A )
def __A ( self ):
# Check the shards when the dataset is a round multiple of batch size.
_lowerCAmelCase : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=__A )
_lowerCAmelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__A , __A , split_batches=__A , even_batches=__A )
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__A )
# Expected shouldn't change
self.check_batch_sampler_shards(__A , __A , split_batches=__A , even_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCAmelCase : Optional[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__A )
_lowerCAmelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__A , __A , split_batches=__A , even_batches=__A )
_lowerCAmelCase : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__A )
_lowerCAmelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__A , __A , split_batches=__A , even_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCAmelCase : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=__A )
_lowerCAmelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__A , __A , split_batches=__A , even_batches=__A )
_lowerCAmelCase : Optional[int] = BatchSampler(range(21 ) , batch_size=4 , drop_last=__A )
_lowerCAmelCase : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__A , __A , split_batches=__A , even_batches=__A )
# Check the shards when the dataset is very small.
_lowerCAmelCase : int = BatchSampler(range(2 ) , batch_size=4 , drop_last=__A )
_lowerCAmelCase : Optional[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(__A , __A , split_batches=__A , even_batches=__A )
_lowerCAmelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__A )
_lowerCAmelCase : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(__A , __A , split_batches=__A , even_batches=__A )
def __A ( self ):
_lowerCAmelCase : str = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_lowerCAmelCase : List[str] = [BatchSamplerShard(__A , 2 , __A , even_batches=__A ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __A ( self , a__ , a__ , a__ , a__=False , a__=2 , a__=False ):
random.seed(__A )
_lowerCAmelCase : str = list(__A )
_lowerCAmelCase : List[str] = [
IterableDatasetShard(
__A , batch_size=__A , drop_last=__A , num_processes=__A , process_index=__A , split_batches=__A , )
for i in range(__A )
]
_lowerCAmelCase : Dict = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__A )
iterable_dataset_lists.append(list(__A ) )
_lowerCAmelCase : Union[str, Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_lowerCAmelCase : Any = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__A ) , len(__A ) )
self.assertTrue(len(__A ) % shard_batch_size == 0 )
_lowerCAmelCase : Optional[int] = []
for idx in range(0 , len(__A ) , __A ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__A ) < len(__A ):
reference += reference
self.assertListEqual(__A , reference[: len(__A )] )
def __A ( self ):
_lowerCAmelCase : int = 42
_lowerCAmelCase : Optional[int] = RandomIterableDataset()
self.check_iterable_dataset_shards(__A , __A , batch_size=4 , drop_last=__A , split_batches=__A )
self.check_iterable_dataset_shards(__A , __A , batch_size=4 , drop_last=__A , split_batches=__A )
self.check_iterable_dataset_shards(__A , __A , batch_size=4 , drop_last=__A , split_batches=__A )
self.check_iterable_dataset_shards(__A , __A , batch_size=4 , drop_last=__A , split_batches=__A )
# Edge case with a very small dataset
_lowerCAmelCase : Tuple = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__A , __A , batch_size=4 , drop_last=__A , split_batches=__A )
self.check_iterable_dataset_shards(__A , __A , batch_size=4 , drop_last=__A , split_batches=__A )
self.check_iterable_dataset_shards(__A , __A , batch_size=4 , drop_last=__A , split_batches=__A )
self.check_iterable_dataset_shards(__A , __A , batch_size=4 , drop_last=__A , split_batches=__A )
def __A ( self ):
_lowerCAmelCase : List[Any] = BatchSampler(range(16 ) , batch_size=4 , drop_last=__A )
_lowerCAmelCase : Tuple = SkipBatchSampler(__A , 2 )
self.assertListEqual(list(__A ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __A ( self ):
_lowerCAmelCase : str = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
_lowerCAmelCase : List[str] = skip_first_batches(__A , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __A ( self ):
_lowerCAmelCase : Optional[int] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __A ( self ):
Accelerator()
_lowerCAmelCase : List[Any] = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 711 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ) -> Dict:
_lowerCAmelCase : List[str] = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
_lowerCAmelCase : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCAmelCase : Any = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCAmelCase : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float64""" ,[dim] )
_lowerCAmelCase : Optional[int] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCAmelCase : Dict = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCAmelCase : List[Any] = tf.placeholder("""int32""" )
_lowerCAmelCase : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float""" ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCAmelCase : Optional[int] = tf.reduce_mean(_lowerCamelCase ,0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCAmelCase : Dict = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase ,_lowerCamelCase ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[noofclusters] )
_lowerCAmelCase : str = tf.argmin(_lowerCamelCase ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCAmelCase : List[str] = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCAmelCase : Any = [
sess.run(_lowerCamelCase ,feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCAmelCase : Any = sess.run(
_lowerCamelCase ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
_lowerCAmelCase : List[Any] = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCAmelCase : Optional[int] = sess.run(
_lowerCamelCase ,feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCAmelCase : Optional[int] = sess.run(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sess.run(_lowerCamelCase )
return centroids, assignments
| 663 | 0 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : List[Any] ) -> Union[str, Any]:
_lowerCAmelCase : int = int(__lowerCAmelCase )
assert noofclusters < len(__lowerCAmelCase )
# Find out the dimensionality
_lowerCAmelCase : int = len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCAmelCase : List[Any] = list(range(len(__lowerCAmelCase ) ) )
shuffle(__lowerCAmelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCAmelCase : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCAmelCase : Any = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCAmelCase : Optional[int] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(__lowerCAmelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float64""" ,[dim] )
_lowerCAmelCase : Dict = []
for centroid in centroids:
cent_assigns.append(tf.assign(__lowerCAmelCase ,__lowerCAmelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCAmelCase : Optional[Any] = [tf.Variable(0 ) for i in range(len(__lowerCAmelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCAmelCase : Dict = tf.placeholder("""int32""" )
_lowerCAmelCase : List[str] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(__lowerCAmelCase ,__lowerCAmelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCAmelCase : List[Any] = tf.placeholder("""float""" ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCAmelCase : List[Any] = tf.reduce_mean(__lowerCAmelCase ,0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCAmelCase : Dict = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : Tuple = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : Optional[int] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__lowerCAmelCase ,__lowerCAmelCase ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCAmelCase : Optional[Any] = tf.placeholder("""float""" ,[noofclusters] )
_lowerCAmelCase : Any = tf.argmin(__lowerCAmelCase ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCAmelCase : Any = tf.initialize_all_variables()
# Initialize all variables
sess.run(__lowerCAmelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCAmelCase : Union[str, Any] = 100
for _ in range(__lowerCAmelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(__lowerCAmelCase ) ):
_lowerCAmelCase : Dict = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCAmelCase : int = [
sess.run(__lowerCAmelCase ,feed_dict={va: vect, va: sess.run(__lowerCAmelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCAmelCase : Optional[Any] = sess.run(
__lowerCAmelCase ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(__lowerCAmelCase ):
# Collect all the vectors assigned to this cluster
_lowerCAmelCase : Any = [
vectors[i]
for i in range(len(__lowerCAmelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCAmelCase : Optional[int] = sess.run(
__lowerCAmelCase ,feed_dict={mean_input: array(__lowerCAmelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCAmelCase : Dict = sess.run(__lowerCAmelCase )
_lowerCAmelCase : List[str] = sess.run(__lowerCAmelCase )
return centroids, assignments
| 712 |
"""simple docstring"""
_a : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 663 | 0 |
"""simple docstring"""
import numpy as np
import datasets
_a : Dict = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
_a : List[Any] = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
_a : Optional[Any] = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ),
} ) , )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = np.array(a__ )
_lowerCAmelCase : List[Any] = np.array(a__ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("""Expected `X` to be a 2D vector""" )
if len(reference_distribution.shape ) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" )
# Get mahalanobis distance for each prediction
_lowerCAmelCase : Tuple = X - np.mean(a__ )
_lowerCAmelCase : Tuple = np.cov(reference_distribution.T )
try:
_lowerCAmelCase : int = np.linalg.inv(a__ )
except np.linalg.LinAlgError:
_lowerCAmelCase : Any = np.linalg.pinv(a__ )
_lowerCAmelCase : List[str] = np.dot(a__ , a__ )
_lowerCAmelCase : Optional[Any] = np.dot(a__ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 713 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __A :
def __init__( self , a__ , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Tuple = 13
_lowerCAmelCase : Tuple = 7
_lowerCAmelCase : Any = 30
_lowerCAmelCase : Optional[int] = self.seq_length + self.mem_len
_lowerCAmelCase : Dict = 15
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[str] = 99
_lowerCAmelCase : List[Any] = [10, 50, 80]
_lowerCAmelCase : Tuple = 32
_lowerCAmelCase : int = 32
_lowerCAmelCase : Dict = 4
_lowerCAmelCase : List[str] = 8
_lowerCAmelCase : Tuple = 128
_lowerCAmelCase : Any = 2
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Optional[int] = self.vocab_size - 1
_lowerCAmelCase : Dict = 0.0_1
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __A ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = TFTransfoXLModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a}
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model(a__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = TFTransfoXLLMHeadModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase : Dict = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
_lowerCAmelCase : Any = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = TFTransfoXLForSequenceClassification(a__ )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Tuple = () if is_tf_available() else ()
_UpperCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : str = False
_UpperCamelCase : str = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , d_embed=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a__ )
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(a__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowerCAmelCase : str = model.get_output_embeddings()
assert isinstance(a__ , tf.keras.layers.Layer )
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
else:
_lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
def __A ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __A ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFTransfoXLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __A ( self ):
pass
@require_tf
class __A ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_lowerCAmelCase : List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCAmelCase : List[Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCAmelCase : Tuple = model.generate(a__ , max_length=200 , do_sample=a__ )
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
| 663 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Optional[int] = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 714 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 663 | 0 |
"""simple docstring"""
from __future__ import annotations
_a : Optional[int] = "Muhammad Umer Farooq"
_a : Optional[Any] = "MIT"
_a : Any = "1.0.0"
_a : Tuple = "Muhammad Umer Farooq"
_a : Union[str, Any] = "[email protected]"
_a : Union[str, Any] = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __A ( __lowerCAmelCase ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : list[str] = []
_lowerCAmelCase : Any = domain
def __A ( self , a__ , a__ ):
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_lowerCAmelCase : List[Any] = parse.urljoin(self.domain , _UpperCamelCase )
self.urls.append(_UpperCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> Optional[int]:
return ".".join(get_sub_domain_name(__A ).split(""".""" )[-2:] )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Any:
return parse.urlparse(__A ).netloc
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = "https://github.com" ) -> str:
_lowerCAmelCase : int = get_domain_name(__A )
# Initialize the parser
_lowerCAmelCase : Union[str, Any] = Parser(__A )
try:
# Open URL
_lowerCAmelCase : Optional[int] = requests.get(__A )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_lowerCAmelCase : Union[str, Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_lowerCAmelCase : Any = requests.get(__A )
# Get the valid email.
_lowerCAmelCase : Optional[int] = re.findall("""[a-zA-Z0-9]+@""" + domain ,read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__A )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__A )
if __name__ == "__main__":
_a : Dict = emails_from_url('https://github.com')
print(F"""{len(emails)} emails found:""")
print('\n'.join(sorted(emails)))
| 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_a : List[Any] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
_a : int = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
_a : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
_a : Optional[Any] = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
_a : Dict = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
_a : Dict = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
_a : str = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
_lowerCAmelCase : Optional[Any] = randrange(len(_lowerCamelCase ) ), randrange(len(_lowerCamelCase ) )
_lowerCAmelCase : List[Any] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
_lowerCAmelCase : int = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] = 100 ) -> List[Any]:
return (generate_random_hand() for _ in range(_lowerCamelCase ))
@pytest.mark.parametrize("""hand, expected""" ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : List[Any] ) -> str:
assert PokerHand(_lowerCamelCase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> Tuple:
assert PokerHand(_lowerCamelCase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Any ) -> List[Any]:
_lowerCAmelCase : int = PokerHand(_lowerCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> List[str]:
assert PokerHand(_lowerCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Tuple ) -> Any:
assert PokerHand(_lowerCamelCase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ) -> Any:
assert PokerHand(_lowerCamelCase ).compare_with(PokerHand(_lowerCamelCase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" ,generate_random_hands() )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : str ,_lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
assert PokerHand(_lowerCamelCase ).compare_with(PokerHand(_lowerCamelCase ) ) == expected
def SCREAMING_SNAKE_CASE ( ) -> Dict:
_lowerCAmelCase : Optional[Any] = [PokerHand(_lowerCamelCase ) for hand in SORTED_HANDS]
_lowerCAmelCase : List[str] = poker_hands.copy()
shuffle(_lowerCamelCase )
_lowerCAmelCase : int = chain(sorted(_lowerCamelCase ) )
for index, hand in enumerate(_lowerCamelCase ):
assert hand == poker_hands[index]
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : Tuple = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=_lowerCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : Union[str, Any] = PokerHand("""2C 4S AS 3D 5C""" )
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Optional[int] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def SCREAMING_SNAKE_CASE ( ) -> Dict:
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Optional[Any] = os.path.abspath(os.path.dirname(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = os.path.join(_lowerCamelCase ,"""poker_hands.txt""" )
with open(_lowerCamelCase ) as file_hand:
for line in file_hand:
_lowerCAmelCase : Dict = line[:14].strip()
_lowerCAmelCase : List[str] = line[15:].strip()
_lowerCAmelCase : List[str] = PokerHand(_lowerCamelCase ), PokerHand(_lowerCamelCase )
_lowerCAmelCase : Dict = player.compare_with(_lowerCamelCase )
if output == "Win":
answer += 1
assert answer == 376
| 716 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = DiTPipeline
_UpperCamelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCamelCase : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=a__ , )
_lowerCAmelCase : Optional[int] = AutoencoderKL()
_lowerCAmelCase : Union[str, Any] = DDIMScheduler()
_lowerCAmelCase : Optional[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Any = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Any = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs(a__ )
_lowerCAmelCase : List[str] = pipe(**a__ ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase : List[Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_lowerCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def __A ( self ):
self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : int = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase : Dict = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase : Union[str, Any] = pipe.get_label_ids(a__ )
_lowerCAmelCase : Any = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
F"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def __A ( self ):
_lowerCAmelCase : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase : List[str] = ["""vase""", """umbrella"""]
_lowerCAmelCase : Optional[int] = pipe.get_label_ids(a__ )
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 663 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : str = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 717 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : Tuple = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 663 | 0 |
"""simple docstring"""
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any]=None ) -> Any:
if subparsers is not None:
_lowerCAmelCase : Optional[Any] = subparsers.add_parser("""test""" )
else:
_lowerCAmelCase : List[Any] = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" ,default=a_ ,help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have """
"""such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed """
"""with \'huggingface\'."""
) ,)
if subparsers is not None:
parser.set_defaults(func=a_ )
return parser
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> Optional[Any]:
_lowerCAmelCase : List[str] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
_lowerCAmelCase : List[Any] = script_name
else:
_lowerCAmelCase : Dict = f"--config_file={args.config_file} {script_name}"
_lowerCAmelCase : Optional[Any] = ['''accelerate-launch'''] + test_args.split()
_lowerCAmelCase : List[Any] = execute_subprocess_async(a_ ,env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def SCREAMING_SNAKE_CASE ( ) -> Dict:
_lowerCAmelCase : Optional[Any] = test_command_parser()
_lowerCAmelCase : Dict = parser.parse_args()
test_command(a_ )
if __name__ == "__main__":
main()
| 718 |
"""simple docstring"""
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> List[Any]:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[int] = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_lowerCAmelCase : List[str] = subprocess.run(_lowerCamelCase ,shell=_lowerCamelCase ,stdout=subprocess.PIPE )
_lowerCAmelCase : int = output.stdout.decode("""utf-8""" )
_lowerCAmelCase : Tuple = json.loads(_lowerCamelCase )
_lowerCAmelCase : int = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" ,"""w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : int = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return values.split(""",""" )
_a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_a : Tuple = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 663 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a : Tuple = logging.get_logger(__name__)
class __A ( _UpperCAmelCase ):
_UpperCamelCase : Union[str, Any] = ['pixel_values']
def __init__( self , a__ = True , a__ = None , a__ = PILImageResampling.BICUBIC , a__ = True , a__ = True , a__ = 1 / 255 , a__ = None , a__ = True , a__ = None , a__ = None , **a__ , ):
super().__init__(**__UpperCamelCase )
_lowerCAmelCase : Optional[Any] = size if size is not None else {"""height""": 224, """width""": 224}
_lowerCAmelCase : str = get_size_dict(__UpperCamelCase )
_lowerCAmelCase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_lowerCAmelCase : List[str] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase , param_name="""crop_size""" )
_lowerCAmelCase : Optional[int] = do_resize
_lowerCAmelCase : Optional[int] = do_rescale
_lowerCAmelCase : int = do_normalize
_lowerCAmelCase : Dict = do_center_crop
_lowerCAmelCase : Optional[Any] = crop_size
_lowerCAmelCase : Optional[int] = size
_lowerCAmelCase : List[str] = resample
_lowerCAmelCase : Any = rescale_factor
_lowerCAmelCase : List[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCAmelCase : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __A ( self , a__ , a__ , a__ = PILImageResampling.BILINEAR , a__ = None , **a__ , ):
_lowerCAmelCase : Union[str, Any] = get_size_dict(__UpperCamelCase )
if "shortest_edge" in size:
_lowerCAmelCase : List[str] = get_resize_output_image_size(__UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=__UpperCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_lowerCAmelCase : Dict = (size["""height"""], size["""width"""])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __A ( self , a__ , a__ , a__ = None , **a__ , ):
_lowerCAmelCase : Dict = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def __A ( self , a__ , a__ , a__ = None , **a__ ):
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __A ( self , a__ , a__ , a__ , a__ = None , **a__ , ):
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __A ( self , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = ChannelDimension.FIRST , **a__ , ):
_lowerCAmelCase : Dict = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : str = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : Any = get_size_dict(__UpperCamelCase , param_name="""crop_size""" , default_to_square=__UpperCamelCase )
_lowerCAmelCase : Optional[Any] = resample if resample is not None else self.resample
_lowerCAmelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : Union[str, Any] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Union[str, Any] = size if size is not None else self.size
_lowerCAmelCase : Union[str, Any] = get_size_dict(__UpperCamelCase )
if not is_batched(__UpperCamelCase ):
_lowerCAmelCase : Optional[Any] = [images]
if not valid_images(__UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase : int = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
_lowerCAmelCase : Any = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
_lowerCAmelCase : Optional[Any] = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
_lowerCAmelCase : List[Any] = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
_lowerCAmelCase : Union[str, Any] = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
_lowerCAmelCase : str = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
_lowerCAmelCase : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 719 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 663 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ) -> list:
_lowerCAmelCase : Optional[Any] = len(_lowerCamelCase )
for i in range(1 ,_lowerCamelCase ):
_lowerCAmelCase : List[str] = collection[i]
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[str] = i - 1
while low <= high:
_lowerCAmelCase : Dict = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Union[str, Any] = mid - 1
else:
_lowerCAmelCase : str = mid + 1
for j in range(_lowerCamelCase ,_lowerCamelCase ,-1 ):
_lowerCAmelCase : str = collection[j - 1]
_lowerCAmelCase : str = val
return collection
if __name__ == "__main__":
_a : str = input('Enter numbers separated by a comma:\n').strip()
_a : Dict = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 720 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int:
_lowerCAmelCase : List[str] = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,_lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 663 | 0 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
def __A ( self ):
debug_launcher(test_script.main )
def __A ( self ):
debug_launcher(test_ops.main )
| 721 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 700 |
"""simple docstring"""
from PIL import Image
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ,_lowerCamelCase : int ) -> Image:
_lowerCAmelCase : Any = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
_a : str = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 663 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Optional[int] = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701 |
"""simple docstring"""
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A :
def __init__( self ):
_lowerCAmelCase : Union[str, Any] = [
[],
[],
[],
]
def __A ( self , a__ , a__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(a__ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __A ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ):
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class __A :
def __init__( self ):
_lowerCAmelCase : int = []
def __A ( self , a__ ):
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(a__ )
def __A ( self ):
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
_lowerCAmelCase : int = min(self.queue )
self.queue.remove(a__ )
return data
def __str__( self ):
return str(self.queue )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Tuple = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 663 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : Union[str, Any] = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 702 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A ( unittest.TestCase ):
_UpperCamelCase : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_UpperCamelCase : Any = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = AudioClassificationPipeline(model=a__ , feature_extractor=a__ )
# test with a raw waveform
_lowerCAmelCase : Optional[int] = np.zeros((34000,) )
_lowerCAmelCase : Optional[Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def __A ( self , a__ , a__ ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = examples
_lowerCAmelCase : List[Any] = audio_classifier(a__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
_lowerCAmelCase : Tuple = audio_classifier(a__ , top_k=1 )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
self.run_torchaudio(a__ )
@require_torchaudio
def __A ( self , a__ ):
import datasets
# test with a local file
_lowerCAmelCase : int = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_lowerCAmelCase : List[Any] = dataset[0]["""audio"""]["""array"""]
_lowerCAmelCase : str = audio_classifier(a__ )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
@require_torch
def __A ( self ):
_lowerCAmelCase : int = """anton-l/wav2vec2-random-tiny-classifier"""
_lowerCAmelCase : Optional[Any] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : Any = np.ones((8000,) )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
_lowerCAmelCase : List[str] = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
_lowerCAmelCase : str = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_lowerCAmelCase : int = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_lowerCAmelCase : int = audio_classifier(a__ , top_k=4 )
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A ( self ):
import datasets
_lowerCAmelCase : Optional[Any] = """superb/wav2vec2-base-superb-ks"""
_lowerCAmelCase : List[str] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : str = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_lowerCAmelCase : Optional[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
self.assertEqual(
nested_simplify(a__ , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __A ( self ):
pass
| 663 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Optional[Any] = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 703 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Dict=8 ) -> Any:
_lowerCAmelCase : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any=512 ,_lowerCamelCase : Dict=512 ) -> List[Any]:
_lowerCAmelCase : Any = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 )
_lowerCAmelCase : Dict = np.array(pil_image.convert("""RGB""" ) )
_lowerCAmelCase : List[str] = arr.astype(np.floataa ) / 1_27.5 - 1
_lowerCAmelCase : int = np.transpose(_lowerCamelCase ,[2, 0, 1] )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
_lowerCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __A ( self , a__ , a__ , a__ ):
# get the original timestep using init_timestep
_lowerCAmelCase : Optional[Any] = min(int(num_inference_steps * strength ) , a__ )
_lowerCAmelCase : List[Any] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__=None ):
if not isinstance(a__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a__ )}" )
_lowerCAmelCase : Union[str, Any] = image.to(device=a__ , dtype=a__ )
_lowerCAmelCase : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCAmelCase : int = image
else:
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(a__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[int] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a__ )
]
_lowerCAmelCase : Optional[int] = torch.cat(a__ , dim=0 )
else:
_lowerCAmelCase : List[Any] = self.movq.encode(a__ ).latent_dist.sample(a__ )
_lowerCAmelCase : Dict = self.movq.config.scaling_factor * init_latents
_lowerCAmelCase : str = torch.cat([init_latents] , dim=0 )
_lowerCAmelCase : Dict = init_latents.shape
_lowerCAmelCase : str = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
# get latents
_lowerCAmelCase : Optional[Any] = self.scheduler.add_noise(a__ , a__ , a__ )
_lowerCAmelCase : int = init_latents
return latents
def __A ( self , a__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : str = torch.device(F"cuda:{gpu_id}" )
_lowerCAmelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __A ( self , a__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase : str = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
_lowerCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self , a__ , a__ , a__ , a__ = 512 , a__ = 512 , a__ = 100 , a__ = 4.0 , a__ = 0.3 , a__ = 1 , a__ = None , a__ = "pil" , a__ = True , ):
_lowerCAmelCase : Dict = self._execution_device
_lowerCAmelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = torch.cat(a__ , dim=0 )
_lowerCAmelCase : Dict = image_embeds.shape[0]
if isinstance(a__ , a__ ):
_lowerCAmelCase : List[Any] = torch.cat(a__ , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : int = image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Any = negative_image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Any = [image]
if not all(isinstance(a__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(a__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
_lowerCAmelCase : Tuple = torch.cat([prepare_image(a__ , a__ , a__ ) for i in image] , dim=0 )
_lowerCAmelCase : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=a__ )
_lowerCAmelCase : Union[str, Any] = self.movq.encode(a__ )["""latents"""]
_lowerCAmelCase : Tuple = latents.repeat_interleave(a__ , dim=0 )
self.scheduler.set_timesteps(a__ , device=a__ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_timesteps(a__ , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCAmelCase , _lowerCAmelCase : Dict = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
_lowerCAmelCase : List[str] = self.prepare_latents(
a__ , a__ , a__ , a__ , image_embeds.dtype , a__ , a__ )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : int = {"""image_embeds""": image_embeds}
_lowerCAmelCase : List[str] = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = variance_pred.chunk(2 )
_lowerCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[str] = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
_lowerCAmelCase : int = self.movq.decode(a__ , force_not_quantize=a__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_lowerCAmelCase : List[Any] = image * 0.5 + 0.5
_lowerCAmelCase : Any = image.clamp(0 , 1 )
_lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 663 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_a : Optional[int] = logging.get_logger(__name__)
class __A ( lowercase__ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 704 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = ShapEPipeline
_UpperCamelCase : Optional[Any] = ["prompt"]
_UpperCamelCase : Tuple = ["prompt"]
_UpperCamelCase : Dict = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : str = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 8
@property
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(a__ )
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_lowerCAmelCase : Any = PriorTransformer(**a__ )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase : Dict = ShapERenderer(**a__ )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_prior
_lowerCAmelCase : Any = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : Dict = self.dummy_renderer
_lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=a__ , clip_sample=a__ , clip_sample_range=1.0 , )
_lowerCAmelCase : List[Any] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[str] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Dict = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : str = self.pipeline_class(**a__ )
_lowerCAmelCase : List[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[str] = output.images[0]
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ):
_lowerCAmelCase : Any = torch_device == """cpu"""
_lowerCAmelCase : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a__ , relax_max_difference=a__ , )
def __A ( self ):
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = self.pipeline_class(**a__ )
_lowerCAmelCase : int = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(a__ )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase : str = batch_size * [inputs[key]]
_lowerCAmelCase : Tuple = pipe(**a__ , num_images_per_prompt=a__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
_lowerCAmelCase : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
_lowerCAmelCase : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : Any = pipe(
"""a shark""" , generator=a__ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a__ , a__ )
| 663 | 0 |
"""simple docstring"""
from __future__ import annotations
_a : int = '#'
class __A :
def __init__( self ):
_lowerCAmelCase : dict = {}
def __A ( self , a__ ):
_lowerCAmelCase : Optional[Any] = self._trie
for char in text:
if char not in trie:
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = trie[char]
_lowerCAmelCase : Optional[Any] = True
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = self._trie
for char in prefix:
if char in trie:
_lowerCAmelCase : Optional[Any] = trie[char]
else:
return []
return self._elements(_a )
def __A ( self , a__ ):
_lowerCAmelCase : Optional[Any] = []
for c, v in d.items():
_lowerCAmelCase : List[str] = [""" """] if c == END else [(c + s) for s in self._elements(_a )]
result.extend(_a )
return tuple(_a )
_a : Optional[int] = Trie()
_a : Union[str, Any] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> str:
_lowerCAmelCase : str = trie.find_word(_lowerCamelCase )
return tuple(string + word for word in suffixes )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 705 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = CpmAntTokenizer
_UpperCamelCase : List[Any] = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Dict = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __A ( self ):
_lowerCAmelCase : Tuple = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_lowerCAmelCase : Optional[Any] = """今天天气真好!"""
_lowerCAmelCase : Any = ["""今天""", """天气""", """真""", """好""", """!"""]
_lowerCAmelCase : str = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = """今天天气真好!"""
_lowerCAmelCase : Optional[Any] = [tokenizer.bos_token] + tokens
_lowerCAmelCase : Optional[int] = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
_lowerCAmelCase : Tuple = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 663 | 0 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ) -> Optional[int]:
_lowerCAmelCase : Any = image.size
_lowerCAmelCase : Tuple = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_lowerCAmelCase : int = image.resize((w, h) ,resample=PIL_INTERPOLATION["""lanczos"""] )
_lowerCAmelCase : Tuple = np.array(snake_case__ ).astype(np.floataa ) / 255.0
_lowerCAmelCase : str = image[None].transpose(0 ,3 ,1 ,2 )
_lowerCAmelCase : str = torch.from_numpy(snake_case__ )
return 2.0 * image - 1.0
class __A ( __a ):
def __init__( self , a__ , a__ , a__ , ):
super().__init__()
self.register_modules(vqvae=a_ , unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self , a__ = None , a__ = 1 , a__ = 100 , a__ = 0.0 , a__ = None , a__ = "pil" , a__ = True , ):
if isinstance(a_ , PIL.Image.Image ):
_lowerCAmelCase : List[Any] = 1
elif isinstance(a_ , torch.Tensor ):
_lowerCAmelCase : int = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(a_ )}" )
if isinstance(a_ , PIL.Image.Image ):
_lowerCAmelCase : Optional[int] = preprocess(a_ )
_lowerCAmelCase : Any = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_lowerCAmelCase : str = (batch_size, self.unet.config.in_channels // 2, height, width)
_lowerCAmelCase : Any = next(self.unet.parameters() ).dtype
_lowerCAmelCase : int = randn_tensor(a_ , generator=a_ , device=self.device , dtype=a_ )
_lowerCAmelCase : List[str] = image.to(device=self.device , dtype=a_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(a_ , device=self.device )
_lowerCAmelCase : Optional[int] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase : Optional[int] = {}
if accepts_eta:
_lowerCAmelCase : Optional[Any] = eta
for t in self.progress_bar(a_ ):
# concat latents and low resolution image in the channel dimension.
_lowerCAmelCase : List[str] = torch.cat([latents, image] , dim=1 )
_lowerCAmelCase : int = self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
_lowerCAmelCase : Any = self.unet(a_ , a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : Dict = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# decode the image latents with the VQVAE
_lowerCAmelCase : Tuple = self.vqvae.decode(a_ ).sample
_lowerCAmelCase : Dict = torch.clamp(a_ , -1.0 , 1.0 )
_lowerCAmelCase : Optional[Any] = image / 2 + 0.5
_lowerCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 706 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = CodeGenTokenizer
_UpperCamelCase : Dict = CodeGenTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : List[Any] = {"add_prefix_space": True}
_UpperCamelCase : str = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_lowerCAmelCase : Optional[int] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Any = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Tuple = """lower newer"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Any = """lower newer"""
# Testing tokenization
_lowerCAmelCase : Any = tokenizer.tokenize(a__ , add_prefix_space=a__ )
_lowerCAmelCase : int = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
_lowerCAmelCase : Dict = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_lowerCAmelCase : int = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
_lowerCAmelCase : Any = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
_lowerCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self , *a__ , **a__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __A ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_lowerCAmelCase : Dict = """This is a simple input"""
_lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Optional[int] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __A ( self ):
_lowerCAmelCase : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
_lowerCAmelCase : Union[str, Any] = """This is a simple input"""
_lowerCAmelCase : Dict = ["""This is a simple input looooooooong""", """This is a simple input"""]
_lowerCAmelCase : Any = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Optional[int] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_lowerCAmelCase : Optional[int] = tokenizer.pad_token_id
_lowerCAmelCase : Any = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
_lowerCAmelCase : str = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __A ( self ):
_lowerCAmelCase : List[str] = """$$$"""
_lowerCAmelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
_lowerCAmelCase : Tuple = """This is a simple input"""
_lowerCAmelCase : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : str = tokenizer(a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __A ( self ):
_lowerCAmelCase : int = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_lowerCAmelCase : Optional[int] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_lowerCAmelCase : List[Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_lowerCAmelCase : Tuple = tokenizer.encode(a__ )
_lowerCAmelCase : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_lowerCAmelCase : int = tokenizer.decode(a__ , truncate_before_pattern=a__ )
self.assertEqual(a__ , a__ )
def __A ( self ):
pass
| 663 | 0 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
_a : Dict = [
'good first issue',
'feature request',
'wip',
]
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Optional[int] = Github(os.environ["""GITHUB_TOKEN"""] )
_lowerCAmelCase : str = g.get_repo("""huggingface/accelerate""" )
_lowerCAmelCase : Dict = repo.get_issues(state="""open""" )
for issue in open_issues:
_lowerCAmelCase : int = sorted([comment for comment in issue.get_comments()] ,key=lambda _lowerCamelCase : i.created_at ,reverse=__snake_case )
_lowerCAmelCase : Tuple = comments[0] if len(__snake_case ) > 0 else None
_lowerCAmelCase : List[str] = dt.utcnow()
_lowerCAmelCase : Any = (current_time - issue.updated_at).days
_lowerCAmelCase : Tuple = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="""closed""" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 707 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : int = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class __A ( _UpperCAmelCase ):
_UpperCamelCase = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_UpperCamelCase = Features({"image": Image()} )
_UpperCamelCase = Features({"labels": ClassLabel} )
_UpperCamelCase = "image"
_UpperCamelCase = "labels"
def __A ( self , a__ ):
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
_lowerCAmelCase : List[str] = copy.deepcopy(self )
_lowerCAmelCase : List[str] = self.label_schema.copy()
_lowerCAmelCase : List[Any] = features[self.label_column]
_lowerCAmelCase : Optional[Any] = label_schema
return task_template
@property
def __A ( self ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 708 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> bool:
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a : int = int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 663 | 0 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
_a : List[Any] = random.Random()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : int=1.0 ,_lowerCamelCase : Optional[int]=None ,_lowerCamelCase : Union[str, Any]=None ) -> Optional[Any]:
if rng is None:
_lowerCAmelCase : Tuple = global_rng
_lowerCAmelCase : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __A ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=400 , a__=2000 , a__=1 , a__=0.0 , a__=16000 , a__=True , a__=80 , a__=16 , a__=64 , a__="hann_window" , a__=80 , a__=7600 , a__=1e-10 , a__=True , ):
_lowerCAmelCase : int = parent
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : Dict = min_seq_length
_lowerCAmelCase : Any = max_seq_length
_lowerCAmelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCAmelCase : Union[str, Any] = feature_size
_lowerCAmelCase : Tuple = padding_value
_lowerCAmelCase : str = sampling_rate
_lowerCAmelCase : Dict = do_normalize
_lowerCAmelCase : str = num_mel_bins
_lowerCAmelCase : List[str] = hop_length
_lowerCAmelCase : str = win_length
_lowerCAmelCase : Optional[Any] = win_function
_lowerCAmelCase : List[str] = fmin
_lowerCAmelCase : Any = fmax
_lowerCAmelCase : Optional[int] = mel_floor
_lowerCAmelCase : Tuple = return_attention_mask
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __A ( self , a__=False , a__=False ):
def _flatten(a__ ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
_lowerCAmelCase : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_lowerCAmelCase : str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCAmelCase : Any = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
def __A ( self , a__=False , a__=False ):
if equal_length:
_lowerCAmelCase : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCAmelCase : Any = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCAmelCase : Optional[int] = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __A ( a__ , unittest.TestCase ):
_UpperCamelCase : Tuple = SpeechTaFeatureExtractor
def __A ( self ):
_lowerCAmelCase : Tuple = SpeechTaFeatureExtractionTester(self )
def __A ( self , a__ ):
self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1e-3 ) )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : Any = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
_lowerCAmelCase : Optional[int] = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
_lowerCAmelCase : Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
# Test batched
_lowerCAmelCase : int = feat_extract(lowerCAmelCase__ , return_tensors="""np""" ).input_values
_lowerCAmelCase : int = feat_extract(lowerCAmelCase__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : int = ["longest", "max_length", "do_not_pad"]
_lowerCAmelCase : Tuple = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
_lowerCAmelCase : Dict = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="""np""" )
_lowerCAmelCase : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __A ( self ):
_lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : List[str] = range(800 , 1400 , 200 )
_lowerCAmelCase : List[str] = [floats_list((1, x) )[0] for x in lengths]
_lowerCAmelCase : Any = ["longest", "max_length", "do_not_pad"]
_lowerCAmelCase : Any = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
_lowerCAmelCase : List[Any] = feat_extract(lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ )
_lowerCAmelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __A ( self ):
_lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : Union[str, Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="""max_length""" , return_tensors="""np""" )
_lowerCAmelCase : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __A ( self ):
_lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : List[Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="""longest""" , return_tensors="""np""" )
_lowerCAmelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
_lowerCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : int = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=2000 , padding="""longest""" , return_tensors="""np""" )
_lowerCAmelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __A ( self ):
_lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : Any = np.random.rand(100 ).astype(np.floataa )
_lowerCAmelCase : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCAmelCase : str = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_lowerCAmelCase : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : Tuple = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
_lowerCAmelCase : Union[str, Any] = feature_extractor(audio_target=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="""np""" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
_lowerCAmelCase : Dict = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values
_lowerCAmelCase : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
# Test batched
_lowerCAmelCase : Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors="""np""" ).input_values
_lowerCAmelCase : Any = feature_extractor(lowerCAmelCase__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowerCAmelCase : List[Any] = np.asarray(lowerCAmelCase__ )
_lowerCAmelCase : str = feature_extractor(lowerCAmelCase__ , return_tensors="""np""" ).input_values
_lowerCAmelCase : str = feature_extractor(lowerCAmelCase__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase : Union[str, Any] = feat_extract.model_input_names[0]
_lowerCAmelCase : List[str] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , processed_features[input_name] ) ) )
_lowerCAmelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
_lowerCAmelCase : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
_lowerCAmelCase : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __A ( self ):
_lowerCAmelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
_lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase : Optional[int] = feat_extract.model_input_names[0]
_lowerCAmelCase : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
_lowerCAmelCase : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __A ( self ):
_lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCAmelCase : Optional[Any] = feat_extract.model_input_names[0]
_lowerCAmelCase : List[str] = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase : Tuple = feat_extract.num_mel_bins # hack!
_lowerCAmelCase : List[Any] = feat_extract.pad(lowerCAmelCase__ , padding="""longest""" , return_tensors="""np""" )[input_name]
_lowerCAmelCase : Any = feat_extract.pad(lowerCAmelCase__ , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def __A ( self ):
_lowerCAmelCase : Any = self.feat_extract_dict
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**lowerCAmelCase__ )
_lowerCAmelCase : Any = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCAmelCase : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
_lowerCAmelCase : int = feat_extract.model_input_names[0]
_lowerCAmelCase : List[Any] = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase : Union[str, Any] = feat_extract.num_mel_bins # hack!
_lowerCAmelCase : Dict = feat_extract.pad(lowerCAmelCase__ , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , lowerCAmelCase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase__ )
def __A ( self ):
_lowerCAmelCase : Tuple = self.feat_extract_dict
_lowerCAmelCase : str = True
_lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**lowerCAmelCase__ )
_lowerCAmelCase : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCAmelCase : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
_lowerCAmelCase : Optional[Any] = feat_extract.model_input_names[0]
_lowerCAmelCase : str = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase : Optional[Any] = min(lowerCAmelCase__ )
_lowerCAmelCase : List[Any] = feat_extract.num_mel_bins # hack!
_lowerCAmelCase : Any = feat_extract.pad(
lowerCAmelCase__ , padding="""max_length""" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="""np""" )
self.assertIn("""attention_mask""" , lowerCAmelCase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __A ( self , a__ ):
from datasets import load_dataset
_lowerCAmelCase : Tuple = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_lowerCAmelCase : Optional[Any] = ds.sort("""id""" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __A ( self ):
# fmt: off
_lowerCAmelCase : List[Any] = torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] )
# fmt: on
_lowerCAmelCase : List[str] = self._load_datasamples(1 )
_lowerCAmelCase : Union[str, Any] = SpeechTaFeatureExtractor()
_lowerCAmelCase : str = feature_extractor(lowerCAmelCase__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCAmelCase__ , atol=1e-6 ) )
def __A ( self ):
# fmt: off
_lowerCAmelCase : Tuple = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
_lowerCAmelCase : Dict = self._load_datasamples(1 )
_lowerCAmelCase : Tuple = SpeechTaFeatureExtractor()
_lowerCAmelCase : Optional[int] = feature_extractor(audio_target=lowerCAmelCase__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase__ , atol=1e-4 ) )
| 709 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __A :
_UpperCamelCase : int
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def SCREAMING_SNAKE_CASE ( ) -> Node | None:
_lowerCAmelCase : Tuple = Node(1 )
_lowerCAmelCase : int = Node(2 )
_lowerCAmelCase : int = Node(3 )
_lowerCAmelCase : Any = Node(4 )
_lowerCAmelCase : Dict = Node(5 )
return tree
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> int:
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
if root is None:
return output
_lowerCAmelCase : Union[str, Any] = deque([root] )
while process_queue:
_lowerCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
_lowerCAmelCase : list[Sequence[Node | None]] = []
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = height(_lowerCamelCase )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Any = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = 0
return output
def SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
_lowerCAmelCase : int = make_tree()
print(f"In-order Traversal: {inorder(_lowerCamelCase )}" )
print(f"Pre-order Traversal: {preorder(_lowerCamelCase )}" )
print(f"Post-order Traversal: {postorder(_lowerCamelCase )}" ,"""\n""" )
print(f"Height of Tree: {height(_lowerCamelCase )}" ,"""\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_lowerCamelCase ) ,"""\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 ,height(_lowerCamelCase ) + 1 ):
print(f"Level {level}:" ,get_nodes_from_left_to_right(_lowerCamelCase ,level=_lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 663 | 0 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __A ( UpperCAmelCase__ ):
_UpperCamelCase : Optional[int] = 42
_UpperCamelCase : str = 42
class __A ( UpperCAmelCase__ , UpperCAmelCase__ ):
_UpperCamelCase : str = 1
@register_to_config
def __init__( self , a__ = 2000 , a__ = 0.1_5 , a__ = 0.0_1 , a__ = 1348.0 , a__ = 1e-5 , a__ = 1 , ):
_lowerCAmelCase : List[Any] = sigma_max
# setable values
_lowerCAmelCase : List[Any] = None
self.set_sigmas(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __A ( self , a__ , a__ = None ):
return sample
def __A ( self , a__ , a__ = None , a__ = None ):
_lowerCAmelCase : List[str] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCAmelCase : Optional[int] = torch.linspace(1 , __lowerCAmelCase , __lowerCAmelCase , device=__lowerCAmelCase )
def __A ( self , a__ , a__ = None , a__ = None , a__ = None ):
_lowerCAmelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCAmelCase : List[str] = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCAmelCase : Union[str, Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase : List[str] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCAmelCase : Tuple = torch.exp(torch.linspace(math.log(__lowerCAmelCase ) , math.log(__lowerCAmelCase ) , __lowerCAmelCase ) )
_lowerCAmelCase : Union[str, Any] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __A ( self , a__ , a__ ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __A ( self , a__ , a__ , a__ , a__ = None , a__ = True , ):
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
_lowerCAmelCase : Optional[Any] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCAmelCase : List[str] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCAmelCase : Optional[int] = timesteps.to(self.discrete_sigmas.device )
_lowerCAmelCase : Dict = self.discrete_sigmas[timesteps].to(sample.device )
_lowerCAmelCase : str = self.get_adjacent_sigma(__lowerCAmelCase , __lowerCAmelCase ).to(sample.device )
_lowerCAmelCase : Optional[int] = torch.zeros_like(__lowerCAmelCase )
_lowerCAmelCase : Any = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCAmelCase : Any = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_lowerCAmelCase : Union[str, Any] = diffusion.unsqueeze(-1 )
_lowerCAmelCase : int = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCAmelCase : Optional[int] = randn_tensor(
sample.shape , layout=sample.layout , generator=__lowerCAmelCase , device=sample.device , dtype=sample.dtype )
_lowerCAmelCase : Tuple = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCAmelCase : List[str] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__lowerCAmelCase , prev_sample_mean=__lowerCAmelCase )
def __A ( self , a__ , a__ , a__ = None , a__ = True , ):
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCAmelCase : Optional[int] = randn_tensor(sample.shape , layout=sample.layout , generator=__lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_lowerCAmelCase : Dict = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCAmelCase : List[str] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCAmelCase : Any = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCAmelCase : Optional[Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCAmelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_lowerCAmelCase : Optional[Any] = step_size.unsqueeze(-1 )
_lowerCAmelCase : Any = sample + step_size * model_output
_lowerCAmelCase : Dict = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase )
def __A ( self , a__ , a__ , a__ , ):
_lowerCAmelCase : Optional[Any] = timesteps.to(original_samples.device )
_lowerCAmelCase : int = self.discrete_sigmas.to(original_samples.device )[timesteps]
_lowerCAmelCase : List[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__lowerCAmelCase ) * sigmas[:, None, None, None]
)
_lowerCAmelCase : str = noise + original_samples
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 710 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : Optional[Any] = torch.optim.SGD(model.parameters() , 0.1 )
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase : Tuple = accelerator.prepare(a__ )
try:
pickle.loads(pickle.dumps(a__ ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 663 | 0 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __A ( __UpperCAmelCase ):
_UpperCamelCase : int = ["""vqvae"""]
def __init__( self , a__ , a__ , a__ , a__ , ):
super().__init__()
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , mel=UpperCAmelCase_ , vqvae=UpperCAmelCase_ )
def __A ( self ):
return 50 if isinstance(self.scheduler , UpperCAmelCase_ ) else 1000
@torch.no_grad()
def __call__( self , a__ = 1 , a__ = None , a__ = None , a__ = 0 , a__ = 0 , a__ = None , a__ = None , a__ = 0 , a__ = 0 , a__ = None , a__ = 0 , a__ = None , a__ = None , a__=True , ):
_lowerCAmelCase : Tuple = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCAmelCase_ )
_lowerCAmelCase : str = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCAmelCase : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCAmelCase : List[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCAmelCase_ , device=self.device , )
_lowerCAmelCase : Optional[Any] = noise
_lowerCAmelCase : List[str] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase : Optional[Any] = self.mel.audio_slice_to_image(UpperCAmelCase_ )
_lowerCAmelCase : Optional[int] = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
_lowerCAmelCase : List[Any] = (input_image / 255) * 2 - 1
_lowerCAmelCase : Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCAmelCase : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase_ , 0 ) ).latent_dist.sample(
generator=UpperCAmelCase_ )[0]
_lowerCAmelCase : Any = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCAmelCase : Tuple = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler.timesteps[start_step - 1] )
_lowerCAmelCase : Dict = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCAmelCase : Any = int(mask_start_secs * pixels_per_second )
_lowerCAmelCase : int = int(mask_end_secs * pixels_per_second )
_lowerCAmelCase : Union[str, Any] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , UpperCAmelCase_ ):
_lowerCAmelCase : Optional[Any] = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )["""sample"""]
else:
_lowerCAmelCase : List[Any] = self.unet(UpperCAmelCase_ , UpperCAmelCase_ )["""sample"""]
if isinstance(self.scheduler , UpperCAmelCase_ ):
_lowerCAmelCase : int = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , )["""prev_sample"""]
else:
_lowerCAmelCase : Tuple = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , generator=UpperCAmelCase_ , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
_lowerCAmelCase : Optional[int] = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCAmelCase : Optional[int] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCAmelCase : List[str] = 1 / self.vqvae.config.scaling_factor * images
_lowerCAmelCase : Optional[Any] = self.vqvae.decode(UpperCAmelCase_ )["""sample"""]
_lowerCAmelCase : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase : Optional[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_lowerCAmelCase : List[Any] = (images * 255).round().astype("""uint8""" )
_lowerCAmelCase : Union[str, Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCAmelCase_ , mode="""RGB""" ).convert("""L""" ) for _ in images) )
_lowerCAmelCase : List[str] = [self.mel.image_to_audio(UpperCAmelCase_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(UpperCAmelCase_ ) )
@torch.no_grad()
def __A ( self , a__ , a__ = 50 ):
assert isinstance(self.scheduler , UpperCAmelCase_ )
self.scheduler.set_timesteps(UpperCAmelCase_ )
_lowerCAmelCase : Optional[int] = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCAmelCase : Optional[int] = (sample / 255) * 2 - 1
_lowerCAmelCase : Tuple = torch.Tensor(UpperCAmelCase_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_lowerCAmelCase : Any = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCAmelCase : List[Any] = self.scheduler.alphas_cumprod[t]
_lowerCAmelCase : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCAmelCase : List[Any] = 1 - alpha_prod_t
_lowerCAmelCase : Optional[Any] = self.unet(UpperCAmelCase_ , UpperCAmelCase_ )["""sample"""]
_lowerCAmelCase : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCAmelCase : str = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCAmelCase : Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __A ( a__ , a__ , a__ ):
_lowerCAmelCase : int = acos(torch.dot(torch.flatten(UpperCAmelCase_ ) , torch.flatten(UpperCAmelCase_ ) ) / torch.norm(UpperCAmelCase_ ) / torch.norm(UpperCAmelCase_ ) )
return sin((1 - alpha) * theta ) * xa / sin(UpperCAmelCase_ ) + sin(alpha * theta ) * xa / sin(UpperCAmelCase_ )
| 711 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ) -> Dict:
_lowerCAmelCase : List[str] = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
_lowerCAmelCase : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCAmelCase : Any = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCAmelCase : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float64""" ,[dim] )
_lowerCAmelCase : Optional[int] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCAmelCase : Dict = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCAmelCase : List[Any] = tf.placeholder("""int32""" )
_lowerCAmelCase : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float""" ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCAmelCase : Optional[int] = tf.reduce_mean(_lowerCamelCase ,0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCAmelCase : Dict = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase ,_lowerCamelCase ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[noofclusters] )
_lowerCAmelCase : str = tf.argmin(_lowerCamelCase ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCAmelCase : List[str] = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCAmelCase : Any = [
sess.run(_lowerCamelCase ,feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCAmelCase : Any = sess.run(
_lowerCamelCase ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
_lowerCAmelCase : List[Any] = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCAmelCase : Optional[int] = sess.run(
_lowerCamelCase ,feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCAmelCase : Optional[int] = sess.run(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sess.run(_lowerCamelCase )
return centroids, assignments
| 663 | 0 |
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_a : Union[str, Any] = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
_a : List[Any] = 'hopper-medium-v2'
_a : Optional[int] = gym.make(env_name)
_a : int = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
_a : str = env.reset()
_a : Optional[Any] = 0
_a : Optional[int] = 0
_a : Optional[Any] = 1_000
_a : Dict = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_a : Optional[int] = pipeline(obs, planning_horizon=32)
# execute action in environment
_a : Union[str, Any] = env.step(denorm_actions)
_a : Dict = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
_a : Optional[Any] = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""")
| 712 |
"""simple docstring"""
_a : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 663 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> int:
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 ,len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_lowerCAmelCase : Dict = grid[0]
for row_n in range(1 ,len(__UpperCamelCase ) ):
_lowerCAmelCase : Any = grid[row_n]
_lowerCAmelCase : Any = fill_row(__UpperCamelCase ,__UpperCamelCase )
_lowerCAmelCase : Any = grid[row_n]
return grid[-1][-1]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : Any ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 ,len(__UpperCamelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __A :
def __init__( self , a__ , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Tuple = 13
_lowerCAmelCase : Tuple = 7
_lowerCAmelCase : Any = 30
_lowerCAmelCase : Optional[int] = self.seq_length + self.mem_len
_lowerCAmelCase : Dict = 15
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[str] = 99
_lowerCAmelCase : List[Any] = [10, 50, 80]
_lowerCAmelCase : Tuple = 32
_lowerCAmelCase : int = 32
_lowerCAmelCase : Dict = 4
_lowerCAmelCase : List[str] = 8
_lowerCAmelCase : Tuple = 128
_lowerCAmelCase : Any = 2
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Optional[int] = self.vocab_size - 1
_lowerCAmelCase : Dict = 0.0_1
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __A ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = TFTransfoXLModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a}
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model(a__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = TFTransfoXLLMHeadModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase : Dict = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
_lowerCAmelCase : Any = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = TFTransfoXLForSequenceClassification(a__ )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Tuple = () if is_tf_available() else ()
_UpperCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : str = False
_UpperCamelCase : str = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , d_embed=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a__ )
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(a__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowerCAmelCase : str = model.get_output_embeddings()
assert isinstance(a__ , tf.keras.layers.Layer )
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
else:
_lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
def __A ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __A ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFTransfoXLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __A ( self ):
pass
@require_tf
class __A ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_lowerCAmelCase : List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCAmelCase : List[Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCAmelCase : Tuple = model.generate(a__ , max_length=200 , do_sample=a__ )
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
| 663 | 0 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_a : List[Any] = 16
_a : Union[str, Any] = 32
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : List[Any] = 16 ,_lowerCamelCase : Optional[Any] = "bert-base-cased" ) -> List[str]:
_lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(_lowerCamelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase : List[str] = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=_lowerCamelCase ,max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCAmelCase : List[str] = datasets.map(
_lowerCamelCase ,batched=_lowerCamelCase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,load_from_cache_file=_lowerCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase : Optional[Any] = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(_lowerCamelCase : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCamelCase ,padding="""max_length""" ,max_length=128 ,return_tensors="""pt""" )
return tokenizer.pad(_lowerCamelCase ,padding="""longest""" ,return_tensors="""pt""" )
# Instantiate dataloaders.
_lowerCAmelCase : Any = DataLoader(
tokenized_datasets["""train"""] ,shuffle=_lowerCamelCase ,collate_fn=_lowerCamelCase ,batch_size=_lowerCamelCase )
_lowerCAmelCase : int = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=_lowerCamelCase ,collate_fn=_lowerCamelCase ,batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : str ) -> List[Any]:
# Initialize accelerator
_lowerCAmelCase : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase : Any = config["""lr"""]
_lowerCAmelCase : Dict = int(config["""num_epochs"""] )
_lowerCAmelCase : List[Any] = int(config["""seed"""] )
_lowerCAmelCase : List[str] = int(config["""batch_size"""] )
_lowerCAmelCase : List[Any] = args.model_name_or_path
set_seed(_lowerCamelCase )
_lowerCAmelCase : List[str] = get_dataloaders(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase : Tuple = AutoModelForSequenceClassification.from_pretrained(_lowerCamelCase ,return_dict=_lowerCamelCase )
# Instantiate optimizer
_lowerCAmelCase : Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowerCAmelCase : List[str] = optimizer_cls(params=model.parameters() ,lr=_lowerCamelCase )
if accelerator.state.deepspeed_plugin is not None:
_lowerCAmelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_lowerCAmelCase : Any = 1
_lowerCAmelCase : str = (len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowerCAmelCase : List[Any] = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase ,num_warmup_steps=0 ,num_training_steps=_lowerCamelCase ,)
else:
_lowerCAmelCase : Dict = DummyScheduler(_lowerCamelCase ,total_num_steps=_lowerCamelCase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase : Dict = accelerator.prepare(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
_lowerCAmelCase : str = 0
# We also need to keep track of the stating epoch so files are named properly
_lowerCAmelCase : Tuple = 0
# Now we train the model
_lowerCAmelCase : str = evaluate.load("""glue""" ,"""mrpc""" )
_lowerCAmelCase : int = 0
_lowerCAmelCase : Tuple = {}
for epoch in range(_lowerCamelCase ,_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
_lowerCAmelCase : List[str] = model(**_lowerCamelCase )
_lowerCAmelCase : int = outputs.loss
_lowerCAmelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_lowerCAmelCase : List[Any] = 0
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(**_lowerCamelCase )
_lowerCAmelCase : Dict = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowerCAmelCase : str = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowerCamelCase ) - 1:
_lowerCAmelCase : int = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCAmelCase : Union[str, Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowerCamelCase ,references=_lowerCamelCase ,)
_lowerCAmelCase : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" ,_lowerCamelCase )
_lowerCAmelCase : str = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
_lowerCAmelCase : Dict = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,"""all_results.json""" ) ,"""w""" ) as f:
json.dump(_lowerCamelCase ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : int = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" ,type=_lowerCamelCase ,default="""bert-base-cased""" ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=_lowerCamelCase ,)
parser.add_argument(
"""--output_dir""" ,type=_lowerCamelCase ,default=""".""" ,help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" ,)
parser.add_argument(
"""--performance_lower_bound""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" ,)
parser.add_argument(
"""--num_epochs""" ,type=_lowerCamelCase ,default=3 ,help="""Number of train epochs.""" ,)
_lowerCAmelCase : Optional[int] = parser.parse_args()
_lowerCAmelCase : Any = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(_lowerCamelCase ,_lowerCamelCase )
if __name__ == "__main__":
main()
| 714 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 663 | 0 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_a : Optional[Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __A :
def __init__( self , a__ , a__=16 , a__=13 , a__=7 , a__=14 , a__=10 , a__=19 , a__=5 , a__=4 , a__=True , a__=16 , a__=2 , a__=4 , a__=4 , a__="gelu" , a__=0.1 , a__=0.1 , a__=[1, 2, 3, 4, 5] , a__=25 , a__=5 , ):
_lowerCAmelCase : List[Any] = d_model
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : Tuple = prediction_length
_lowerCAmelCase : Tuple = context_length
_lowerCAmelCase : List[Any] = cardinality
_lowerCAmelCase : Dict = num_time_features
_lowerCAmelCase : Optional[int] = lags_sequence
_lowerCAmelCase : Union[str, Any] = embedding_dimension
_lowerCAmelCase : int = is_training
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[Any] = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = context_length
_lowerCAmelCase : List[Any] = prediction_length + label_length
_lowerCAmelCase : Tuple = label_length
_lowerCAmelCase : int = moving_average
_lowerCAmelCase : Optional[int] = autocorrelation_factor
def __A ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __A ( self , a__ ):
_lowerCAmelCase : List[str] = config.context_length + max(config.lags_sequence )
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_lowerCAmelCase : str = floats_tensor([self.batch_size, _past_length] )
_lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, config.prediction_length] )
_lowerCAmelCase : Any = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def __A ( self ):
_lowerCAmelCase : List[str] = self.get_config()
_lowerCAmelCase : List[str] = self.prepare_autoformer_inputs_dict(__lowercase )
return config, inputs_dict
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = AutoformerModel(config=__lowercase ).to(__lowercase ).eval()
_lowerCAmelCase : Any = model(**__lowercase )
_lowerCAmelCase : Union[str, Any] = outputs.encoder_last_hidden_state
_lowerCAmelCase : Optional[int] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Dict = model.get_encoder()
encoder.save_pretrained(__lowercase )
_lowerCAmelCase : int = AutoformerEncoder.from_pretrained(__lowercase ).to(__lowercase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = model.create_network_inputs(**__lowercase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_lowerCAmelCase : Union[str, Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_lowerCAmelCase : List[Any] = encoder(inputs_embeds=__lowercase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_lowerCAmelCase : Union[str, Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_lowerCAmelCase : Union[str, Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_lowerCAmelCase : str = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_lowerCAmelCase : str = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__lowercase )
_lowerCAmelCase : int = AutoformerDecoder.from_pretrained(__lowercase ).to(__lowercase )
_lowerCAmelCase : int = decoder(
trend=__lowercase , inputs_embeds=__lowercase , encoder_hidden_states=__lowercase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __A ( lowercase__ , lowercase__ , unittest.TestCase ):
_UpperCamelCase : Any = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_UpperCamelCase : Optional[int] = (AutoformerForPrediction,) if is_torch_available() else ()
_UpperCamelCase : Any = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Any = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Optional[Any] = False
def __A ( self ):
_lowerCAmelCase : List[Any] = AutoformerModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = model_class(__lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
_lowerCAmelCase , _lowerCAmelCase : Dict = model_class.from_pretrained(__lowercase , output_loading_info=__lowercase )
self.assertEqual(info["""missing_keys"""] , [] )
def __A ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__lowercase )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase : Tuple = inspect.signature(getattr(__lowercase , """forward""" ) )
# The main input is the name of the argument after `self`
_lowerCAmelCase : str = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __lowercase )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[Any] = model_class(__lowercase )
_lowerCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : int = [*signature.parameters.keys()]
_lowerCAmelCase : Dict = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__lowercase )] , __lowercase )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = True
_lowerCAmelCase : Union[str, Any] = getattr(self.model_tester , """seq_length""" , __lowercase )
_lowerCAmelCase : Dict = getattr(self.model_tester , """decoder_seq_length""" , __lowercase )
_lowerCAmelCase : Dict = getattr(self.model_tester , """encoder_seq_length""" , __lowercase )
_lowerCAmelCase : Optional[int] = getattr(self.model_tester , """d_model""" , __lowercase )
_lowerCAmelCase : str = getattr(self.model_tester , """num_attention_heads""" , __lowercase )
_lowerCAmelCase : Union[str, Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Dict = False
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Optional[Any] = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
_lowerCAmelCase : int = model(**self._prepare_for_class(__lowercase , __lowercase ) )
_lowerCAmelCase : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(__lowercase , __lowercase ) )
_lowerCAmelCase : List[str] = outputs.encoder_attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_lowerCAmelCase : Tuple = len(__lowercase )
_lowerCAmelCase : Optional[Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__lowercase , __lowercase )
# decoder attentions
_lowerCAmelCase : str = outputs.decoder_attentions
self.assertIsInstance(__lowercase , (list, tuple) )
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_lowerCAmelCase : Optional[int] = outputs.cross_attentions
self.assertIsInstance(__lowercase , (list, tuple) )
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : str = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
_lowerCAmelCase : List[str] = model(**self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(out_len + 2 , len(__lowercase ) )
_lowerCAmelCase : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __A ( self ):
super().test_retain_grad_hidden_states_attentions()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str]="train-batch.pt" ) -> str:
_lowerCAmelCase : List[Any] = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" ,filename=SCREAMING_SNAKE_CASE_ ,repo_type="""dataset""" )
_lowerCAmelCase : List[str] = torch.load(SCREAMING_SNAKE_CASE_ ,map_location=SCREAMING_SNAKE_CASE_ )
return batch
@require_torch
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : List[str] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__lowercase )
_lowerCAmelCase : Optional[int] = prepare_batch()
with torch.no_grad():
_lowerCAmelCase : Dict = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
_lowerCAmelCase : int = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __lowercase )
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=__lowercase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __lowercase , atol=__lowercase ) )
def __A ( self ):
_lowerCAmelCase : str = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__lowercase )
_lowerCAmelCase : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_lowerCAmelCase : Any = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
_lowerCAmelCase : Any = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __lowercase )
_lowerCAmelCase : Any = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=__lowercase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __lowercase , atol=__lowercase ) )
def __A ( self ):
_lowerCAmelCase : Dict = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__lowercase )
_lowerCAmelCase : Any = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_lowerCAmelCase : Tuple = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
_lowerCAmelCase : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __lowercase )
_lowerCAmelCase : Dict = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=__lowercase )
_lowerCAmelCase : Union[str, Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __lowercase , rtol=1e-1 ) )
| 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : List[Any] = [[1, 2, 4], [1, 2, 3, 4]]
_lowerCAmelCase : Dict = DisjunctiveConstraint(A_ )
self.assertTrue(isinstance(dc.token_ids , A_ ) )
with self.assertRaises(A_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(A_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(A_ ):
DisjunctiveConstraint(A_ ) # fails here
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = [[1, 2, 3], [1, 2, 4]]
_lowerCAmelCase : str = DisjunctiveConstraint(A_ )
_lowerCAmelCase : List[Any] = dc.update(1 )
_lowerCAmelCase : Dict = stepped is True and completed is False and reset is False
self.assertTrue(A_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_lowerCAmelCase : List[str] = dc.update(2 )
_lowerCAmelCase : List[Any] = stepped is True and completed is False and reset is False
self.assertTrue(A_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_lowerCAmelCase : Dict = dc.update(3 )
_lowerCAmelCase : Optional[int] = stepped is True and completed is True and reset is False
self.assertTrue(A_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __A ( self ):
_lowerCAmelCase : Tuple = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_lowerCAmelCase : Any = DisjunctiveConstraint(A_ )
_lowerCAmelCase : List[str] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_lowerCAmelCase : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_lowerCAmelCase : Optional[int] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_lowerCAmelCase : Union[str, Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_lowerCAmelCase : Union[str, Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_lowerCAmelCase : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_lowerCAmelCase : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 716 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = DiTPipeline
_UpperCamelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCamelCase : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=a__ , )
_lowerCAmelCase : Optional[int] = AutoencoderKL()
_lowerCAmelCase : Union[str, Any] = DDIMScheduler()
_lowerCAmelCase : Optional[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Any = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Any = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs(a__ )
_lowerCAmelCase : List[str] = pipe(**a__ ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase : List[Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_lowerCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def __A ( self ):
self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : int = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase : Dict = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase : Union[str, Any] = pipe.get_label_ids(a__ )
_lowerCAmelCase : Any = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
F"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def __A ( self ):
_lowerCAmelCase : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase : List[str] = ["""vase""", """umbrella"""]
_lowerCAmelCase : Optional[int] = pipe.get_label_ids(a__ )
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 663 | 0 |
"""simple docstring"""
import numpy as np
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.array ) -> List[str]:
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.array ) -> Tuple:
return vector * sigmoid(1.7_02 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : Tuple = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 663 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Optional[int] ) -> Dict:
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
_lowerCAmelCase : Optional[int] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__lowerCAmelCase ) )
return round(__lowerCAmelCase ,ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
"""simple docstring"""
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> List[Any]:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[int] = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_lowerCAmelCase : List[str] = subprocess.run(_lowerCamelCase ,shell=_lowerCamelCase ,stdout=subprocess.PIPE )
_lowerCAmelCase : int = output.stdout.decode("""utf-8""" )
_lowerCAmelCase : Tuple = json.loads(_lowerCamelCase )
_lowerCAmelCase : int = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" ,"""w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : int = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return values.split(""",""" )
_a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_a : Tuple = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 663 | 0 |
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str = 3 ) -> Optional[Any]:
if isinstance(lowercase__ ,lowercase__ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(lowercase__ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
_lowerCAmelCase : Dict = QuantumRegister(lowercase__ ,"""qr""" )
_lowerCAmelCase : Any = ClassicalRegister(lowercase__ ,"""cr""" )
_lowerCAmelCase : Dict = QuantumCircuit(lowercase__ ,lowercase__ )
_lowerCAmelCase : Optional[Any] = number_of_qubits
for i in range(lowercase__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(lowercase__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) ,lowercase__ ,lowercase__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(lowercase__ ,number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(lowercase__ ,lowercase__ )
# simulate with 10000 shots
_lowerCAmelCase : List[Any] = Aer.get_backend("""qasm_simulator""" )
_lowerCAmelCase : Tuple = execute(lowercase__ ,lowercase__ ,shots=10000 )
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 719 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 663 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __A ( __lowercase ):
_UpperCamelCase : torch.FloatTensor
_UpperCamelCase : torch.FloatTensor
_UpperCamelCase : Optional[torch.FloatTensor] = None
class __A ( __lowercase , __lowercase ):
_UpperCamelCase : Any = 2
@register_to_config
def __init__( self , a__ = 0.0_2 , a__ = 100 , a__ = 1.0_0_7 , a__ = 80 , a__ = 0.0_5 , a__ = 50 , ):
# standard deviation of the initial noise distribution
_lowerCAmelCase : Tuple = sigma_max
# setable values
_lowerCAmelCase : int = None
_lowerCAmelCase : np.IntTensor = None
_lowerCAmelCase : torch.FloatTensor = None # sigma(t_i)
def __A ( self , a__ , a__ = None ):
return sample
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = num_inference_steps
_lowerCAmelCase : Union[str, Any] = np.arange(0 , self.num_inference_steps )[::-1].copy()
_lowerCAmelCase : Dict = torch.from_numpy(__a ).to(__a )
_lowerCAmelCase : int = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_lowerCAmelCase : List[Any] = torch.tensor(__a , dtype=torch.floataa , device=__a )
def __A ( self , a__ , a__ , a__ = None ):
if self.config.s_min <= sigma <= self.config.s_max:
_lowerCAmelCase : str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
_lowerCAmelCase : Optional[Any] = 0
# sample eps ~ N(0, S_noise^2 * I)
_lowerCAmelCase : str = self.config.s_noise * randn_tensor(sample.shape , generator=__a ).to(sample.device )
_lowerCAmelCase : str = sigma + gamma * sigma
_lowerCAmelCase : Dict = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __A ( self , a__ , a__ , a__ , a__ , a__ = True , ):
_lowerCAmelCase : List[str] = sample_hat + sigma_hat * model_output
_lowerCAmelCase : Tuple = (sample_hat - pred_original_sample) / sigma_hat
_lowerCAmelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__a , derivative=__a , pred_original_sample=__a )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ = True , ):
_lowerCAmelCase : List[str] = sample_prev + sigma_prev * model_output
_lowerCAmelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev
_lowerCAmelCase : Any = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__a , derivative=__a , pred_original_sample=__a )
def __A ( self , a__ , a__ , a__ ):
raise NotImplementedError()
| 720 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int:
_lowerCAmelCase : List[str] = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,_lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 663 | 0 |
"""simple docstring"""
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
_a : str = float('nan')
class __A :
def __init__( self , a__ ):
_lowerCAmelCase : Union[str, Any] = sys.stdout
_lowerCAmelCase : List[Any] = open(UpperCAmelCase__ , """a""" )
def __getattr__( self , a__ ):
return getattr(self.stdout , UpperCAmelCase__ )
def __A ( self , a__ ):
self.stdout.write(UpperCAmelCase__ )
# strip tqdm codes
self.file.write(re.sub(r"""^.*\r""" , """""" , UpperCAmelCase__ , 0 , re.M ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str]=80 ,_lowerCamelCase : Dict=False ) -> Tuple:
_lowerCAmelCase : Optional[int] = []
# deal with critical env vars
_lowerCAmelCase : Optional[Any] = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
_lowerCAmelCase : Union[str, Any] = os.environ.get(_UpperCamelCase ,_UpperCamelCase )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
_lowerCAmelCase : List[Any] = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(_UpperCamelCase )
# now the normal args
cmd += list(map(shlex.quote ,sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[int] = """"""
while len(_UpperCamelCase ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(_UpperCamelCase ) == 0 or len(_UpperCamelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCamelCase )
_lowerCAmelCase : str = """"""
return "\\\n".join(_UpperCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int ) -> Tuple:
_lowerCAmelCase : List[str] = re.sub(r"""[\\\n]+""" ,""" """ ,args.base_cmd )
# remove --output_dir if any and set our own
_lowerCAmelCase : Optional[Any] = re.sub("""--output_dir\s+[^\s]+""" ,"""""" ,args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
_lowerCAmelCase : Dict = re.sub("""--overwrite_output_dir\s+""" ,"""""" ,args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : List[str] ,_lowerCamelCase : Tuple ,_lowerCamelCase : List[str] ,_lowerCamelCase : int ) -> Optional[Any]:
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 ,100 ) for k in metric_keys} ,**{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} ,)
_lowerCAmelCase : Dict = subprocess.run(_UpperCamelCase ,capture_output=_UpperCamelCase ,text=_UpperCamelCase )
if verbose:
print("""STDOUT""" ,result.stdout )
print("""STDERR""" ,result.stderr )
# save the streams
_lowerCAmelCase : List[str] = variation.replace(""" """ ,"""-""" )
with open(Path(_UpperCamelCase ) / f"log.{prefix}.stdout.txt" ,"""w""" ) as f:
f.write(result.stdout )
with open(Path(_UpperCamelCase ) / f"log.{prefix}.stderr.txt" ,"""w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" ,"""r""" ,encoding="""utf-8""" ) as f:
_lowerCAmelCase : List[Any] = json.load(_UpperCamelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Any ,_lowerCamelCase : Dict ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : List[str] ,_lowerCamelCase : Dict ,) -> str:
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Tuple = f"{id}: {variation:<{longest_variation_len}}"
_lowerCAmelCase : Optional[int] = f"{preamble}: "
_lowerCAmelCase : Dict = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCamelCase ) ,desc=_UpperCamelCase ,leave=_UpperCamelCase ):
_lowerCAmelCase : List[str] = process_run_single(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
_lowerCAmelCase : Tuple = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCamelCase ):
metrics.append(_UpperCamelCase )
results.append(_UpperCamelCase )
outcome += "✓"
else:
outcome += "✘"
_lowerCAmelCase : Union[str, Any] = f"\33[2K\r{outcome}"
if len(_UpperCamelCase ) > 0:
_lowerCAmelCase : List[Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_lowerCAmelCase : List[str] = round(mean_metrics[target_metric_key] ,2 )
_lowerCAmelCase : Optional[int] = f"{outcome} {mean_target}"
if len(_UpperCamelCase ) > 1:
results_str += f" {tuple(round(_UpperCamelCase ,2 ) for x in results )}"
print(_UpperCamelCase )
_lowerCAmelCase : Union[str, Any] = variation
return mean_metrics
else:
print(_UpperCamelCase )
return {variation_key: variation, target_metric_key: nan}
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Any = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Tuple ,_lowerCamelCase : Dict ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ) -> Optional[Any]:
_lowerCAmelCase : Dict = pd.DataFrame(_UpperCamelCase )
_lowerCAmelCase : List[Any] = """variation"""
_lowerCAmelCase : str = """diff_%"""
_lowerCAmelCase : Any = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_lowerCAmelCase : List[Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCamelCase ):
# as a fallback, use the minimal value as the sentinel
_lowerCAmelCase : Union[str, Any] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCamelCase ):
_lowerCAmelCase : Dict = df.apply(
lambda _lowerCamelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 ,axis="""columns""" ,)
# re-order columns
_lowerCAmelCase : List[Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_lowerCAmelCase : int = df.reindex(_UpperCamelCase ,axis="""columns""" ) # reorder cols
# capitalize
_lowerCAmelCase : Any = df.rename(str.capitalize ,axis="""columns""" )
# make the cols as narrow as possible
_lowerCAmelCase : List[str] = df.rename(lambda _lowerCamelCase : c.replace("""_""" ,"""<br>""" ) ,axis="""columns""" )
_lowerCAmelCase : Tuple = df.rename(lambda _lowerCamelCase : c.replace("""_""" ,"""\n""" ) ,axis="""columns""" )
_lowerCAmelCase : Optional[int] = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCamelCase ,floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCamelCase ,floatfmt=""".2f""" )]
print("""\n\n""".join(_UpperCamelCase ) )
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" ,default=_UpperCamelCase ,type=_UpperCamelCase ,required=_UpperCamelCase ,help="""Base cmd""" ,)
parser.add_argument(
"""--variations""" ,default=_UpperCamelCase ,type=_UpperCamelCase ,nargs="""+""" ,required=_UpperCamelCase ,help="""Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'""" ,)
parser.add_argument(
"""--base-variation""" ,default=_UpperCamelCase ,type=_UpperCamelCase ,help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" ,)
parser.add_argument(
"""--target-metric-key""" ,default=_UpperCamelCase ,type=_UpperCamelCase ,required=_UpperCamelCase ,help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" ,)
parser.add_argument(
"""--report-metric-keys""" ,default="""""" ,type=_UpperCamelCase ,help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples""" ,)
parser.add_argument(
"""--repeat-times""" ,default=1 ,type=_UpperCamelCase ,help="""How many times to re-run each variation - an average will be reported""" ,)
parser.add_argument(
"""--output_dir""" ,default="""output_benchmark""" ,type=_UpperCamelCase ,help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" ,)
parser.add_argument(
"""--verbose""" ,default=_UpperCamelCase ,action="""store_true""" ,help="""Whether to show the outputs of each run or just the benchmark progress""" ,)
_lowerCAmelCase : int = parser.parse_args()
_lowerCAmelCase : int = args.output_dir
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
_lowerCAmelCase : Union[str, Any] = get_base_command(_UpperCamelCase ,_UpperCamelCase )
# split each dimension into its --foo variations
_lowerCAmelCase : Union[str, Any] = [list(map(str.strip ,re.split(r"""\|""" ,_UpperCamelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_lowerCAmelCase : Optional[Any] = list(map(str.strip ,map(""" """.join ,itertools.product(*_UpperCamelCase ) ) ) )
_lowerCAmelCase : str = max(len(_UpperCamelCase ) for x in variations )
# split wanted keys
_lowerCAmelCase : Optional[int] = args.report_metric_keys.split()
# capture prints into a log file for convenience
_lowerCAmelCase : Any = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script's output is also piped into {report_fn}" )
_lowerCAmelCase : int = Tee(_UpperCamelCase )
print(f"\n*** Running {len(_UpperCamelCase )} benchmarks:" )
print(f"Base command: {' '.join(_UpperCamelCase )}" )
_lowerCAmelCase : List[Any] = """variation"""
_lowerCAmelCase : List[Any] = []
for id, variation in enumerate(tqdm(_UpperCamelCase ,desc="""Total completion: """ ,leave=_UpperCamelCase ) ):
_lowerCAmelCase : Tuple = base_cmd + variation.split()
results.append(
process_run(
id + 1 ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,args.target_metric_key ,_UpperCamelCase ,args.repeat_times ,_UpperCamelCase ,args.verbose ,) )
process_results(_UpperCamelCase ,args.target_metric_key ,_UpperCamelCase ,args.base_variation ,_UpperCamelCase )
if __name__ == "__main__":
main()
| 721 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_a : Dict = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : Optional[Any]=False ) -> Tuple:
_lowerCAmelCase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : Dict ,_lowerCamelCase : Optional[Any]=False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : Tuple = """"""
else:
_lowerCAmelCase : Tuple = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : Optional[int] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Optional[int] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : Any = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : str = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> Optional[int]:
_lowerCAmelCase : Dict = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Dict ) -> List[Any]:
_lowerCAmelCase : Optional[Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : List[str] = val
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase : Optional[int] = Image.open(requests.get(_lowerCamelCase ,stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : int ,_lowerCamelCase : List[str]=True ) -> str:
_lowerCAmelCase : str = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCAmelCase : int = 8
# set labels if required
if not base_model:
_lowerCAmelCase : int = 1000
_lowerCAmelCase : List[str] = """huggingface/label-files"""
_lowerCAmelCase : List[str] = """imagenet-1k-id2label.json"""
_lowerCAmelCase : Any = json.load(open(hf_hub_download(_lowerCamelCase ,_lowerCamelCase ,repo_type="""dataset""" ) ,"""r""" ) )
_lowerCAmelCase : List[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : List[Any] = idalabel
_lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCAmelCase : Optional[int] = 384
_lowerCAmelCase : List[Any] = 1536
_lowerCAmelCase : List[str] = 12
_lowerCAmelCase : Tuple = 6
# load original model from torch hub
_lowerCAmelCase : List[Any] = torch.hub.load("""facebookresearch/dino:main""" ,_lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : Dict = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCAmelCase : str = create_rename_keys(_lowerCamelCase ,base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
read_in_q_k_v(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# load HuggingFace model
if base_model:
_lowerCAmelCase : Dict = ViTModel(_lowerCamelCase ,add_pooling_layer=_lowerCamelCase ).eval()
else:
_lowerCAmelCase : Any = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCAmelCase : Dict = ViTImageProcessor()
_lowerCAmelCase : List[Any] = image_processor(images=prepare_img() ,return_tensors="""pt""" )
_lowerCAmelCase : List[str] = encoding["""pixel_values"""]
_lowerCAmelCase : Any = model(_lowerCamelCase )
if base_model:
_lowerCAmelCase : Union[str, Any] = original_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase ,outputs.last_hidden_state[:, 0, :] ,atol=1e-1 )
else:
_lowerCAmelCase : Optional[Any] = original_model(_lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase ,outputs.logits ,atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
_a : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 700 |
"""simple docstring"""
from PIL import Image
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ,_lowerCamelCase : int ) -> Image:
_lowerCAmelCase : Any = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
_a : str = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 663 | 0 |
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ''
_UpperCamelCase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCamelCase : str = None # compression type in fsspec. ex: "gzip"
_UpperCamelCase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , a__ = "" , a__ = None , a__ = None , **a__ ):
super().__init__(self , **a__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_lowerCAmelCase : Any = fsspec.open(
a__ , mode="""rb""" , protocol=a__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_lowerCAmelCase : Tuple = os.path.basename(self.file.path.split("""::""" )[0] )
_lowerCAmelCase : int = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
_lowerCAmelCase : int = None
@classmethod
def __A ( cls , a__ ):
return super()._strip_protocol(a__ ).lstrip("""/""" )
def __A ( self ):
if self.dir_cache is None:
_lowerCAmelCase : Optional[Any] = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
_lowerCAmelCase : int = {f["""name"""]: f}
def __A ( self , a__ ):
return self.file.open().read()
def __A ( self , a__ , a__ = "rb" , a__=None , a__=True , a__=None , **a__ , ):
_lowerCAmelCase : List[Any] = self._strip_protocol(a__ )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = 'bz2'
_UpperCamelCase : Union[str, Any] = 'bz2'
_UpperCamelCase : Union[str, Any] = '.bz2'
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = 'gzip'
_UpperCamelCase : Dict = 'gzip'
_UpperCamelCase : int = '.gz'
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = 'lz4'
_UpperCamelCase : Optional[int] = 'lz4'
_UpperCamelCase : List[str] = '.lz4'
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = 'xz'
_UpperCamelCase : List[Any] = 'xz'
_UpperCamelCase : Tuple = '.xz'
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = 'zstd'
_UpperCamelCase : List[str] = 'zstd'
_UpperCamelCase : Union[str, Any] = '.zst'
def __init__( self , a__ , a__ = "rb" , a__ = None , a__ = None , a__ = DEFAULT_BLOCK_SIZE , **a__ , ):
super().__init__(
fo=a__ , mode=a__ , target_protocol=a__ , target_options=a__ , block_size=a__ , **a__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_lowerCAmelCase : Tuple = self.file.__enter__
class __A :
def __init__( self , a__ ):
_lowerCAmelCase : int = file_
def __enter__( self ):
self._file.__enter__()
return self
def __exit__( self , *a__ , **a__ ):
self._file.__exit__(*a__ , **a__ )
def __iter__( self ):
return iter(self._file )
def __A ( self ):
return next(self._file )
def __getattr__( self , a__ ):
return getattr(self._file , a__ )
def fixed_enter(*a__ , **a__ ):
return WrappedFile(_enter(*a__ , **a__ ) )
_lowerCAmelCase : List[str] = fixed_enter
| 701 |
"""simple docstring"""
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A :
def __init__( self ):
_lowerCAmelCase : Union[str, Any] = [
[],
[],
[],
]
def __A ( self , a__ , a__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(a__ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __A ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ):
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class __A :
def __init__( self ):
_lowerCAmelCase : int = []
def __A ( self , a__ ):
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(a__ )
def __A ( self ):
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
_lowerCAmelCase : int = min(self.queue )
self.queue.remove(a__ )
return data
def __str__( self ):
return str(self.queue )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Tuple = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 663 | 0 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a : Tuple = logging.get_logger(__name__)
class __A ( __snake_case ):
_UpperCamelCase : str = ["pixel_values"]
def __init__( self , a__ = True , a__ = None , a__ = PILImageResampling.BICUBIC , a__ = True , a__ = None , a__ = True , a__ = 1 / 255 , a__ = True , a__ = IMAGENET_DEFAULT_MEAN , a__ = IMAGENET_DEFAULT_STD , **a__ , ):
super().__init__(**__UpperCamelCase )
_lowerCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224}
_lowerCAmelCase : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_lowerCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_lowerCAmelCase : int = get_size_dict(__UpperCamelCase , param_name="""crop_size""" )
_lowerCAmelCase : str = do_resize
_lowerCAmelCase : Dict = size
_lowerCAmelCase : Optional[int] = resample
_lowerCAmelCase : Any = do_center_crop
_lowerCAmelCase : List[str] = crop_size
_lowerCAmelCase : List[str] = do_rescale
_lowerCAmelCase : Optional[Any] = rescale_factor
_lowerCAmelCase : List[Any] = do_normalize
_lowerCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCAmelCase : int = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __A ( self , a__ , a__ , a__ = PILImageResampling.BICUBIC , a__ = None , **a__ , ):
_lowerCAmelCase : List[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_lowerCAmelCase : str = int((256 / 224) * size["""shortest_edge"""] )
_lowerCAmelCase : Tuple = get_resize_output_image_size(__UpperCamelCase , size=__UpperCamelCase , default_to_square=__UpperCamelCase )
_lowerCAmelCase : Dict = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
__UpperCamelCase , size=(size_dict["""height"""], size_dict["""width"""]) , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __A ( self , a__ , a__ , a__ = None , **a__ , ):
_lowerCAmelCase : Dict = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(__UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def __A ( self , a__ , a__ , a__ = None , **a__ , ):
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __A ( self , a__ , a__ , a__ , a__ = None , **a__ , ):
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __A ( self , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = ChannelDimension.FIRST , **a__ , ):
_lowerCAmelCase : List[Any] = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : int = resample if resample is not None else self.resample
_lowerCAmelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : Any = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : Dict = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Dict = size if size is not None else self.size
_lowerCAmelCase : List[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_lowerCAmelCase : Optional[Any] = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : Dict = get_size_dict(__UpperCamelCase , param_name="""crop_size""" )
_lowerCAmelCase : Union[str, Any] = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase : List[Any] = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
_lowerCAmelCase : Optional[int] = [self.resize(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for image in images]
if do_center_crop:
_lowerCAmelCase : str = [self.center_crop(__UpperCamelCase , __UpperCamelCase ) for image in images]
if do_rescale:
_lowerCAmelCase : List[str] = [self.rescale(__UpperCamelCase , __UpperCamelCase ) for image in images]
if do_normalize:
_lowerCAmelCase : Any = [self.normalize(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for image in images]
_lowerCAmelCase : List[Any] = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
_lowerCAmelCase : Tuple = {"""pixel_values""": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 702 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A ( unittest.TestCase ):
_UpperCamelCase : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_UpperCamelCase : Any = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = AudioClassificationPipeline(model=a__ , feature_extractor=a__ )
# test with a raw waveform
_lowerCAmelCase : Optional[int] = np.zeros((34000,) )
_lowerCAmelCase : Optional[Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def __A ( self , a__ , a__ ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = examples
_lowerCAmelCase : List[Any] = audio_classifier(a__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
_lowerCAmelCase : Tuple = audio_classifier(a__ , top_k=1 )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
self.run_torchaudio(a__ )
@require_torchaudio
def __A ( self , a__ ):
import datasets
# test with a local file
_lowerCAmelCase : int = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_lowerCAmelCase : List[Any] = dataset[0]["""audio"""]["""array"""]
_lowerCAmelCase : str = audio_classifier(a__ )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
@require_torch
def __A ( self ):
_lowerCAmelCase : int = """anton-l/wav2vec2-random-tiny-classifier"""
_lowerCAmelCase : Optional[Any] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : Any = np.ones((8000,) )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
_lowerCAmelCase : List[str] = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
_lowerCAmelCase : str = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_lowerCAmelCase : int = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_lowerCAmelCase : int = audio_classifier(a__ , top_k=4 )
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A ( self ):
import datasets
_lowerCAmelCase : Optional[Any] = """superb/wav2vec2-base-superb-ks"""
_lowerCAmelCase : List[str] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : str = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_lowerCAmelCase : Optional[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
self.assertEqual(
nested_simplify(a__ , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __A ( self ):
pass
| 663 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_a : List[str] = logging.get_logger(__name__)
class __A ( a__ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 703 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Dict=8 ) -> Any:
_lowerCAmelCase : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any=512 ,_lowerCamelCase : Dict=512 ) -> List[Any]:
_lowerCAmelCase : Any = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 )
_lowerCAmelCase : Dict = np.array(pil_image.convert("""RGB""" ) )
_lowerCAmelCase : List[str] = arr.astype(np.floataa ) / 1_27.5 - 1
_lowerCAmelCase : int = np.transpose(_lowerCamelCase ,[2, 0, 1] )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
_lowerCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __A ( self , a__ , a__ , a__ ):
# get the original timestep using init_timestep
_lowerCAmelCase : Optional[Any] = min(int(num_inference_steps * strength ) , a__ )
_lowerCAmelCase : List[Any] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__=None ):
if not isinstance(a__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a__ )}" )
_lowerCAmelCase : Union[str, Any] = image.to(device=a__ , dtype=a__ )
_lowerCAmelCase : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCAmelCase : int = image
else:
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(a__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[int] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a__ )
]
_lowerCAmelCase : Optional[int] = torch.cat(a__ , dim=0 )
else:
_lowerCAmelCase : List[Any] = self.movq.encode(a__ ).latent_dist.sample(a__ )
_lowerCAmelCase : Dict = self.movq.config.scaling_factor * init_latents
_lowerCAmelCase : str = torch.cat([init_latents] , dim=0 )
_lowerCAmelCase : Dict = init_latents.shape
_lowerCAmelCase : str = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
# get latents
_lowerCAmelCase : Optional[Any] = self.scheduler.add_noise(a__ , a__ , a__ )
_lowerCAmelCase : int = init_latents
return latents
def __A ( self , a__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : str = torch.device(F"cuda:{gpu_id}" )
_lowerCAmelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __A ( self , a__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase : str = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
_lowerCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self , a__ , a__ , a__ , a__ = 512 , a__ = 512 , a__ = 100 , a__ = 4.0 , a__ = 0.3 , a__ = 1 , a__ = None , a__ = "pil" , a__ = True , ):
_lowerCAmelCase : Dict = self._execution_device
_lowerCAmelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = torch.cat(a__ , dim=0 )
_lowerCAmelCase : Dict = image_embeds.shape[0]
if isinstance(a__ , a__ ):
_lowerCAmelCase : List[Any] = torch.cat(a__ , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : int = image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Any = negative_image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Any = [image]
if not all(isinstance(a__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(a__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
_lowerCAmelCase : Tuple = torch.cat([prepare_image(a__ , a__ , a__ ) for i in image] , dim=0 )
_lowerCAmelCase : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=a__ )
_lowerCAmelCase : Union[str, Any] = self.movq.encode(a__ )["""latents"""]
_lowerCAmelCase : Tuple = latents.repeat_interleave(a__ , dim=0 )
self.scheduler.set_timesteps(a__ , device=a__ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_timesteps(a__ , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCAmelCase , _lowerCAmelCase : Dict = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
_lowerCAmelCase : List[str] = self.prepare_latents(
a__ , a__ , a__ , a__ , image_embeds.dtype , a__ , a__ )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : int = {"""image_embeds""": image_embeds}
_lowerCAmelCase : List[str] = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = variance_pred.chunk(2 )
_lowerCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[str] = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
_lowerCAmelCase : int = self.movq.decode(a__ , force_not_quantize=a__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_lowerCAmelCase : List[Any] = image * 0.5 + 0.5
_lowerCAmelCase : Any = image.clamp(0 , 1 )
_lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 663 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Any = ArgumentParser("""Transformers CLI tool""" ,usage="""transformers-cli <command> [<args>]""" )
_lowerCAmelCase : str = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
DownloadCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
RunCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
ServeCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
UserCommands.register_subcommand(SCREAMING_SNAKE_CASE__ )
AddNewModelCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
AddNewModelLikeCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
LfsCommands.register_subcommand(SCREAMING_SNAKE_CASE__ )
PTtoTFCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
# Let's go
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE__ ,"""func""" ):
parser.print_help()
exit(1 )
# Run
_lowerCAmelCase : Tuple = args.func(SCREAMING_SNAKE_CASE__ )
service.run()
if __name__ == "__main__":
main()
| 704 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = ShapEPipeline
_UpperCamelCase : Optional[Any] = ["prompt"]
_UpperCamelCase : Tuple = ["prompt"]
_UpperCamelCase : Dict = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : str = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 8
@property
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(a__ )
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_lowerCAmelCase : Any = PriorTransformer(**a__ )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase : Dict = ShapERenderer(**a__ )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_prior
_lowerCAmelCase : Any = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : Dict = self.dummy_renderer
_lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=a__ , clip_sample=a__ , clip_sample_range=1.0 , )
_lowerCAmelCase : List[Any] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[str] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Dict = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : str = self.pipeline_class(**a__ )
_lowerCAmelCase : List[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[str] = output.images[0]
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ):
_lowerCAmelCase : Any = torch_device == """cpu"""
_lowerCAmelCase : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a__ , relax_max_difference=a__ , )
def __A ( self ):
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = self.pipeline_class(**a__ )
_lowerCAmelCase : int = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(a__ )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase : str = batch_size * [inputs[key]]
_lowerCAmelCase : Tuple = pipe(**a__ , num_images_per_prompt=a__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
_lowerCAmelCase : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
_lowerCAmelCase : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : Any = pipe(
"""a shark""" , generator=a__ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a__ , a__ )
| 663 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Dict = logging.get_logger(__name__)
_a : List[str] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class __A ( __SCREAMING_SNAKE_CASE ):
_UpperCamelCase : str = "camembert"
def __init__( self , a__=30522 , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , a__=None , **a__ , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = max_position_embeddings
_lowerCAmelCase : Optional[int] = type_vocab_size
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Tuple = position_embedding_type
_lowerCAmelCase : Tuple = use_cache
_lowerCAmelCase : Dict = classifier_dropout
class __A ( __SCREAMING_SNAKE_CASE ):
@property
def __A ( self ):
if self.task == "multiple-choice":
_lowerCAmelCase : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCAmelCase : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 705 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = CpmAntTokenizer
_UpperCamelCase : List[Any] = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Dict = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __A ( self ):
_lowerCAmelCase : Tuple = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_lowerCAmelCase : Optional[Any] = """今天天气真好!"""
_lowerCAmelCase : Any = ["""今天""", """天气""", """真""", """好""", """!"""]
_lowerCAmelCase : str = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = """今天天气真好!"""
_lowerCAmelCase : Optional[Any] = [tokenizer.bos_token] + tokens
_lowerCAmelCase : Optional[int] = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
_lowerCAmelCase : Tuple = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 663 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = ["torch", "transformers", "onnx"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = ["torch", "transformers", "onnx"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["torch", "transformers", "onnx"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = ["torch", "transformers", "onnx"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ["torch", "transformers", "onnx"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 706 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = CodeGenTokenizer
_UpperCamelCase : Dict = CodeGenTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : List[Any] = {"add_prefix_space": True}
_UpperCamelCase : str = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_lowerCAmelCase : Optional[int] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Any = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Tuple = """lower newer"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Any = """lower newer"""
# Testing tokenization
_lowerCAmelCase : Any = tokenizer.tokenize(a__ , add_prefix_space=a__ )
_lowerCAmelCase : int = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
_lowerCAmelCase : Dict = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_lowerCAmelCase : int = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
_lowerCAmelCase : Any = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
_lowerCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self , *a__ , **a__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __A ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_lowerCAmelCase : Dict = """This is a simple input"""
_lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Optional[int] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __A ( self ):
_lowerCAmelCase : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
_lowerCAmelCase : Union[str, Any] = """This is a simple input"""
_lowerCAmelCase : Dict = ["""This is a simple input looooooooong""", """This is a simple input"""]
_lowerCAmelCase : Any = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Optional[int] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_lowerCAmelCase : Optional[int] = tokenizer.pad_token_id
_lowerCAmelCase : Any = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
_lowerCAmelCase : str = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __A ( self ):
_lowerCAmelCase : List[str] = """$$$"""
_lowerCAmelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
_lowerCAmelCase : Tuple = """This is a simple input"""
_lowerCAmelCase : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : str = tokenizer(a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __A ( self ):
_lowerCAmelCase : int = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_lowerCAmelCase : Optional[int] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_lowerCAmelCase : List[Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_lowerCAmelCase : Tuple = tokenizer.encode(a__ )
_lowerCAmelCase : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_lowerCAmelCase : int = tokenizer.decode(a__ , truncate_before_pattern=a__ )
self.assertEqual(a__ , a__ )
def __A ( self ):
pass
| 663 | 0 |
"""simple docstring"""
from __future__ import annotations
import requests
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> dict:
_lowerCAmelCase : List[Any] = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(lowercase_ ).json()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] = 10 ) -> list[dict]:
_lowerCAmelCase : Optional[int] = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
_lowerCAmelCase : List[Any] = requests.get(lowercase_ ).json()[:max_stories]
return [get_hackernews_story(lowercase_ ) for story_id in story_ids]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str = 10 ) -> str:
_lowerCAmelCase : List[Any] = hackernews_top_stories(lowercase_ )
return "\n".join("""* [{title}]({url})""".format(**lowercase_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 707 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : int = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ) -> np.ndarray:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : Union[str, Any] ) -> np.ndarray:
_lowerCAmelCase : Optional[Any] = np.zeros_like(__UpperCamelCase )
_lowerCAmelCase : Union[str, Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
_lowerCAmelCase : str = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
_lowerCAmelCase : int = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
_lowerCAmelCase : Union[str, Any] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
_a : List[Any] = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
_a : Optional[Any] = np.array(Image.open(lena_path))
# kernel to be applied
_a : Union[str, Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
_a : Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
_a : List[Any] = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 708 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> bool:
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a : int = int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 663 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a : Optional[Any] = 16
_a : Tuple = 32
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : Optional[Any] = 16 ) -> str:
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_lowerCAmelCase : Union[str, Any] = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(_lowerCamelCase : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase : Any = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=_lowerCamelCase ,max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCAmelCase : List[Any] = datasets.map(
_lowerCamelCase ,batched=_lowerCamelCase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase : int = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(_lowerCamelCase : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCAmelCase : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCAmelCase : Dict = 16
elif accelerator.mixed_precision != "no":
_lowerCAmelCase : List[str] = 8
else:
_lowerCAmelCase : Union[str, Any] = None
return tokenizer.pad(
_lowerCamelCase ,padding="""longest""" ,max_length=_lowerCamelCase ,pad_to_multiple_of=_lowerCamelCase ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
_lowerCAmelCase : Tuple = DataLoader(
tokenized_datasets["""train"""] ,shuffle=_lowerCamelCase ,collate_fn=_lowerCamelCase ,batch_size=_lowerCamelCase )
_lowerCAmelCase : int = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=_lowerCamelCase ,collate_fn=_lowerCamelCase ,batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a : Optional[int] = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[str] ) -> Dict:
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,_lowerCamelCase ) == "1":
_lowerCAmelCase : List[Any] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
_lowerCAmelCase : List[str] = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,log_with="""all""" ,project_dir=args.project_dir )
else:
_lowerCAmelCase : List[Any] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase : Any = config["""lr"""]
_lowerCAmelCase : Dict = int(config["""num_epochs"""] )
_lowerCAmelCase : List[Any] = int(config["""seed"""] )
_lowerCAmelCase : Dict = int(config["""batch_size"""] )
set_seed(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = get_dataloaders(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : List[str] = evaluate.load("""glue""" ,"""mrpc""" )
# If the batch size is too big we use gradient accumulation
_lowerCAmelCase : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowerCAmelCase : Tuple = batch_size // MAX_GPU_BATCH_SIZE
_lowerCAmelCase : Tuple = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase : List[str] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase : Any = model.to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase : Tuple = AdamW(params=model.parameters() ,lr=_lowerCamelCase )
# Instantiate scheduler
_lowerCAmelCase : int = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase ,num_warmup_steps=100 ,num_training_steps=(len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = accelerator.prepare(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
_lowerCAmelCase : str = os.path.split(_lowerCamelCase )[-1].split(""".""" )[0]
accelerator.init_trackers(_lowerCamelCase ,_lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
_lowerCAmelCase : Any = 0
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCAmelCase : Dict = model(**_lowerCamelCase )
_lowerCAmelCase : int = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
_lowerCAmelCase : Tuple = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase : Tuple = model(**_lowerCamelCase )
_lowerCAmelCase : Tuple = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_lowerCamelCase ,references=_lowerCamelCase ,)
_lowerCAmelCase : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" ,_lowerCamelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(_lowerCamelCase ),
"""epoch""": epoch,
} ,step=_lowerCamelCase ,)
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" ,action="""store_true""" ,help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" ,)
parser.add_argument(
"""--project_dir""" ,type=_lowerCamelCase ,default="""logs""" ,help="""Location on where to store experiment tracking logs` and relevent project information""" ,)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
_lowerCAmelCase : Optional[Any] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_lowerCamelCase ,_lowerCamelCase )
if __name__ == "__main__":
main()
| 709 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __A :
_UpperCamelCase : int
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def SCREAMING_SNAKE_CASE ( ) -> Node | None:
_lowerCAmelCase : Tuple = Node(1 )
_lowerCAmelCase : int = Node(2 )
_lowerCAmelCase : int = Node(3 )
_lowerCAmelCase : Any = Node(4 )
_lowerCAmelCase : Dict = Node(5 )
return tree
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> int:
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
if root is None:
return output
_lowerCAmelCase : Union[str, Any] = deque([root] )
while process_queue:
_lowerCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
_lowerCAmelCase : list[Sequence[Node | None]] = []
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = height(_lowerCamelCase )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Any = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = 0
return output
def SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
_lowerCAmelCase : int = make_tree()
print(f"In-order Traversal: {inorder(_lowerCamelCase )}" )
print(f"Pre-order Traversal: {preorder(_lowerCamelCase )}" )
print(f"Post-order Traversal: {postorder(_lowerCamelCase )}" ,"""\n""" )
print(f"Height of Tree: {height(_lowerCamelCase )}" ,"""\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_lowerCamelCase ) ,"""\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 ,height(_lowerCamelCase ) + 1 ):
print(f"Level {level}:" ,get_nodes_from_left_to_right(_lowerCamelCase ,level=_lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 663 | 0 |
"""simple docstring"""
_a : Optional[int] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Optional[Any] ) -> int:
# Return True if there is node that has not iterated.
_lowerCAmelCase : str = [False] * len(snake_case_ )
_lowerCAmelCase : Optional[int] = [s]
_lowerCAmelCase : str = True
while queue:
_lowerCAmelCase : Dict = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(snake_case_ )
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Tuple = u
return visited[t]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : int ,_lowerCamelCase : Tuple ) -> Union[str, Any]:
_lowerCAmelCase : List[str] = [-1] * (len(snake_case_ ))
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Any = [i[:] for i in graph] # Record original cut, copy.
while bfs(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ):
_lowerCAmelCase : Optional[int] = float("""Inf""" )
_lowerCAmelCase : Dict = sink
while s != source:
# Find the minimum value in select path
_lowerCAmelCase : Any = min(snake_case_ ,graph[parent[s]][s] )
_lowerCAmelCase : Tuple = parent[s]
max_flow += path_flow
_lowerCAmelCase : Tuple = sink
while v != source:
_lowerCAmelCase : Optional[int] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCAmelCase : Union[str, Any] = parent[v]
for i in range(len(snake_case_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 710 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : Optional[Any] = torch.optim.SGD(model.parameters() , 0.1 )
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase : Tuple = accelerator.prepare(a__ )
try:
pickle.loads(pickle.dumps(a__ ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 663 | 0 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __A :
def __init__( self , a__ , a__=13 , a__=2 , a__=24 , a__=16 , a__=True , a__=True , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.0_2 , a__=None , a__=2 , a__=2 , ):
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : int = max_length
_lowerCAmelCase : Tuple = num_mel_bins
_lowerCAmelCase : List[Any] = is_training
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : Dict = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Dict = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : List[str] = scope
_lowerCAmelCase : List[Any] = frequency_stride
_lowerCAmelCase : str = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCAmelCase : str = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCAmelCase : int = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCAmelCase : Any = frequency_out_dimension * time_out_dimension
_lowerCAmelCase : Dict = num_patches + 2
def __A ( self ):
_lowerCAmelCase : str = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCAmelCase : Tuple = None
if self.use_labels:
_lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : List[Any] = self.get_config()
return config, input_values, labels
def __A ( self ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = ASTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self ):
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) : str = config_and_inputs
_lowerCAmelCase : Optional[int] = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[str] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase : List[str] = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
_UpperCamelCase : Any = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Union[str, Any] = False
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = ASTModelTester(self )
_lowerCAmelCase : Dict = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Tuple = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __A ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(UpperCamelCase__ )
_lowerCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Dict = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ['''input_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __A ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@slow
def __A ( self ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : int = ASTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
_lowerCAmelCase : Tuple = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" ,filename="""sample_audio.flac""" ,repo_type="""dataset""" )
_lowerCAmelCase : List[Any] = torchaudio.load(__UpperCamelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __A ( unittest.TestCase ):
@cached_property
def __A ( self ):
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.default_feature_extractor
_lowerCAmelCase : Optional[Any] = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(UpperCamelCase__ )
_lowerCAmelCase : str = self.default_feature_extractor
_lowerCAmelCase : Optional[int] = prepare_audio()
_lowerCAmelCase : Optional[Any] = audio.squeeze().numpy()
_lowerCAmelCase : Any = feature_extractor(UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(**UpperCamelCase__ )
# verify the logits
_lowerCAmelCase : Optional[int] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
_lowerCAmelCase : str = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 711 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ) -> Dict:
_lowerCAmelCase : List[str] = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
_lowerCAmelCase : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCAmelCase : Any = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCAmelCase : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float64""" ,[dim] )
_lowerCAmelCase : Optional[int] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCAmelCase : Dict = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCAmelCase : List[Any] = tf.placeholder("""int32""" )
_lowerCAmelCase : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float""" ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCAmelCase : Optional[int] = tf.reduce_mean(_lowerCamelCase ,0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCAmelCase : Dict = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase ,_lowerCamelCase ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[noofclusters] )
_lowerCAmelCase : str = tf.argmin(_lowerCamelCase ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCAmelCase : List[str] = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCAmelCase : Any = [
sess.run(_lowerCamelCase ,feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCAmelCase : Any = sess.run(
_lowerCamelCase ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
_lowerCAmelCase : List[Any] = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCAmelCase : Optional[int] = sess.run(
_lowerCamelCase ,feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCAmelCase : Optional[int] = sess.run(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sess.run(_lowerCamelCase )
return centroids, assignments
| 663 | 0 |
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
_a : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
_a : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Dict ) -> List[str]:
for attribute in key.split(""".""" ):
_lowerCAmelCase : Tuple = getattr(lowerCamelCase_ ,lowerCamelCase_ )
if weight_type is not None:
_lowerCAmelCase : Union[str, Any] = getattr(lowerCamelCase_ ,lowerCamelCase_ ).shape
else:
_lowerCAmelCase : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
_lowerCAmelCase : Dict = value
elif weight_type == "weight_g":
_lowerCAmelCase : List[str] = value
elif weight_type == "weight_v":
_lowerCAmelCase : Tuple = value
elif weight_type == "bias":
_lowerCAmelCase : List[Any] = value
else:
_lowerCAmelCase : List[Any] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : Dict ) -> Dict:
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = fairseq_model.state_dict()
_lowerCAmelCase : List[str] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,hf_model.config.feat_extract_norm == """group""" ,)
_lowerCAmelCase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
_lowerCAmelCase : str = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
_lowerCAmelCase : int = True
if "*" in mapped_key:
_lowerCAmelCase : Tuple = name.split(lowerCamelCase_ )[0].split(""".""" )[-2]
_lowerCAmelCase : int = mapped_key.replace("""*""" ,lowerCamelCase_ )
if "weight_g" in name:
_lowerCAmelCase : str = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase : Optional[Any] = """weight_v"""
elif "bias" in name:
_lowerCAmelCase : List[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase : Dict = """weight"""
else:
_lowerCAmelCase : Tuple = None
set_recursively(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
continue
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(f"Unused weights: {unused_weights}" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : List[str] ,_lowerCamelCase : Optional[int] ) -> Dict:
_lowerCAmelCase : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase : List[str] = name.split(""".""" )
_lowerCAmelCase : Tuple = int(items[0] )
_lowerCAmelCase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_lowerCAmelCase : str = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_lowerCAmelCase : Union[str, Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
_lowerCAmelCase : Optional[int] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
_lowerCAmelCase : List[Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCamelCase_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : str=None ,_lowerCamelCase : Optional[Any]=None ,_lowerCamelCase : str=True ) -> List[Any]:
if config_path is not None:
_lowerCAmelCase : Union[str, Any] = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
else:
_lowerCAmelCase : Optional[Any] = UniSpeechSatConfig()
_lowerCAmelCase : Dict = """"""
if is_finetuned:
_lowerCAmelCase : str = UniSpeechSatForCTC(lowerCamelCase_ )
else:
_lowerCAmelCase : Optional[Any] = UniSpeechSatForPreTraining(lowerCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
_lowerCAmelCase : int = model[0].eval()
recursively_load_weights(lowerCamelCase_ ,lowerCamelCase_ )
hf_wavavec.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_a : Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
_a : List[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 712 |
"""simple docstring"""
_a : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 663 | 0 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_a : Union[str, Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_a : Dict = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : List[str] = calculate_rouge(__snake_case ,__snake_case ,bootstrap_aggregation=__snake_case ,rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(__snake_case ,__snake_case )
_lowerCAmelCase : Optional[int] = calculate_rouge(__snake_case ,__snake_case ,bootstrap_aggregation=__snake_case ,rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : List[Any] = "rougeLsum"
_lowerCAmelCase : Optional[Any] = calculate_rouge(__snake_case ,__snake_case ,newline_sep=__snake_case ,rouge_keys=[k] )[k]
_lowerCAmelCase : Optional[Any] = calculate_rouge(__snake_case ,__snake_case ,newline_sep=__snake_case ,rouge_keys=[k] )[k]
assert score > score_no_sep
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : str = ["rouge1", "rouge2", "rougeL"]
_lowerCAmelCase : Optional[Any] = calculate_rouge(__snake_case ,__snake_case ,newline_sep=__snake_case ,rouge_keys=__snake_case )
_lowerCAmelCase : Union[str, Any] = calculate_rouge(__snake_case ,__snake_case ,newline_sep=__snake_case ,rouge_keys=__snake_case )
assert score_sep == score_no_sep
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
_lowerCAmelCase : List[str] = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
_lowerCAmelCase : Optional[Any] = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(__snake_case ,__snake_case ,newline_sep=__snake_case ) == calculate_rouge(__snake_case ,__snake_case ,newline_sep=__snake_case )
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
_lowerCAmelCase : Optional[Any] = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
_lowerCAmelCase : Optional[int] = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
_lowerCAmelCase : Any = calculate_rouge(__snake_case ,__snake_case ,rouge_keys=["""rougeLsum"""] ,newline_sep=__snake_case )["rougeLsum"]
_lowerCAmelCase : Union[str, Any] = calculate_rouge(__snake_case ,__snake_case ,rouge_keys=["""rougeLsum"""] )["rougeLsum"]
assert new_score > prev_score
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
_lowerCAmelCase : int = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
_lowerCAmelCase : Any = calculate_rouge_path(data_dir.joinpath("""test.source""" ) ,data_dir.joinpath("""test.target""" ) )
assert isinstance(__snake_case ,__snake_case )
_lowerCAmelCase : List[str] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) ,data_dir.joinpath("""test.target""" ) ,bootstrap_aggregation=__snake_case )
assert isinstance(__snake_case ,__snake_case )
| 713 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __A :
def __init__( self , a__ , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Tuple = 13
_lowerCAmelCase : Tuple = 7
_lowerCAmelCase : Any = 30
_lowerCAmelCase : Optional[int] = self.seq_length + self.mem_len
_lowerCAmelCase : Dict = 15
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[str] = 99
_lowerCAmelCase : List[Any] = [10, 50, 80]
_lowerCAmelCase : Tuple = 32
_lowerCAmelCase : int = 32
_lowerCAmelCase : Dict = 4
_lowerCAmelCase : List[str] = 8
_lowerCAmelCase : Tuple = 128
_lowerCAmelCase : Any = 2
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Optional[int] = self.vocab_size - 1
_lowerCAmelCase : Dict = 0.0_1
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __A ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = TFTransfoXLModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a}
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model(a__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = TFTransfoXLLMHeadModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase : Dict = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
_lowerCAmelCase : Any = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = TFTransfoXLForSequenceClassification(a__ )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Tuple = () if is_tf_available() else ()
_UpperCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : str = False
_UpperCamelCase : str = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , d_embed=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a__ )
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(a__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowerCAmelCase : str = model.get_output_embeddings()
assert isinstance(a__ , tf.keras.layers.Layer )
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
else:
_lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
def __A ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __A ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFTransfoXLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __A ( self ):
pass
@require_tf
class __A ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_lowerCAmelCase : List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCAmelCase : List[Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCAmelCase : Tuple = model.generate(a__ , max_length=200 , do_sample=a__ )
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
| 663 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=16 , a__=36 , a__=6 , a__=6 , a__=6 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : int = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : Optional[Any] = seq_length
_lowerCAmelCase : Optional[Any] = is_training
_lowerCAmelCase : str = use_input_mask
_lowerCAmelCase : List[str] = use_token_type_ids
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Tuple = vocab_size
_lowerCAmelCase : Optional[int] = embedding_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Tuple = num_hidden_groups
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Dict = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[Any] = num_labels
_lowerCAmelCase : Optional[int] = num_choices
_lowerCAmelCase : Any = scope
def __A ( self ):
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Dict = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = AlbertModel(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase : Tuple = model(__A , attention_mask=__A , token_type_ids=__A )
_lowerCAmelCase : Optional[Any] = model(__A , token_type_ids=__A )
_lowerCAmelCase : List[Any] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = AlbertForPreTraining(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase : Optional[Any] = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , sentence_order_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = AlbertForMaskedLM(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase : List[Any] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Dict = AlbertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase : Optional[int] = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Any = AlbertForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowerCAmelCase : str = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = self.num_labels
_lowerCAmelCase : str = AlbertForTokenClassification(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase : Dict = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = self.num_choices
_lowerCAmelCase : Optional[Any] = AlbertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : List[Any] = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Dict = config_and_inputs
_lowerCAmelCase : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( __lowercase , __lowercase , unittest.TestCase ):
_UpperCamelCase : Dict = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : int = True
def __A ( self , a__ , a__ , a__=False ):
_lowerCAmelCase : int = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
_lowerCAmelCase : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A )
_lowerCAmelCase : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = AlbertModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self , config_class=__A , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __A ( self ):
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def __A ( self ):
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : int = type
self.model_tester.create_and_check_model(*__A )
@slow
def __A ( self ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Tuple = AlbertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
class __A ( unittest.TestCase ):
@slow
def __A ( self ):
_lowerCAmelCase : int = AlbertModel.from_pretrained("""albert-base-v2""" )
_lowerCAmelCase : str = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_lowerCAmelCase : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Tuple = model(__A , attention_mask=__A )[0]
_lowerCAmelCase : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __A )
_lowerCAmelCase : List[str] = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) )
| 714 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 663 | 0 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __A ( UpperCamelCase__ ):
def __A ( self , a__ ):
with open(__A , encoding="""utf-8""" ) as input_file:
_lowerCAmelCase : Optional[int] = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
_lowerCAmelCase : List[Any] = input_file.read()
_lowerCAmelCase : Tuple = regexp.search(__A )
return match
def __A ( self , a__ ):
with open(__A , encoding="""utf-8""" ) as input_file:
_lowerCAmelCase : Union[str, Any] = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
_lowerCAmelCase : Tuple = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_lowerCAmelCase : Optional[Any] = regexp.finditer(__A )
_lowerCAmelCase : Any = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __A ( self ):
_lowerCAmelCase : Optional[int] = Path("""./datasets""" )
_lowerCAmelCase : int = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__A ) ):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = Path("""./datasets""" )
_lowerCAmelCase : Any = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__A ) ):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
class __A ( __UpperCAmelCase ):
_UpperCamelCase : str = '''encoder-decoder'''
_UpperCamelCase : List[Any] = True
def __init__( self , **a__ ):
super().__init__(**__SCREAMING_SNAKE_CASE )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_lowerCAmelCase : int = kwargs.pop("""encoder""" )
_lowerCAmelCase : Union[str, Any] = encoder_config.pop("""model_type""" )
_lowerCAmelCase : int = kwargs.pop("""decoder""" )
_lowerCAmelCase : str = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase : Tuple = AutoConfig.for_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
_lowerCAmelCase : List[str] = AutoConfig.for_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Dict = True
@classmethod
def __A ( cls , a__ , a__ , **a__ ):
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__SCREAMING_SNAKE_CASE )
def __A ( self ):
_lowerCAmelCase : str = copy.deepcopy(self.__dict__ )
_lowerCAmelCase : Optional[Any] = self.encoder.to_dict()
_lowerCAmelCase : List[str] = self.decoder.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 716 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = DiTPipeline
_UpperCamelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCamelCase : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=a__ , )
_lowerCAmelCase : Optional[int] = AutoencoderKL()
_lowerCAmelCase : Union[str, Any] = DDIMScheduler()
_lowerCAmelCase : Optional[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Any = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Any = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs(a__ )
_lowerCAmelCase : List[str] = pipe(**a__ ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase : List[Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_lowerCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def __A ( self ):
self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : int = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase : Dict = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase : Union[str, Any] = pipe.get_label_ids(a__ )
_lowerCAmelCase : Any = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
F"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def __A ( self ):
_lowerCAmelCase : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase : List[str] = ["""vase""", """umbrella"""]
_lowerCAmelCase : Optional[int] = pipe.get_label_ids(a__ )
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 663 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : Any = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 717 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : Tuple = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 663 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : int = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = "gptj"
_UpperCamelCase : Union[str, Any] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , a__=50400 , a__=2048 , a__=4096 , a__=28 , a__=16 , a__=64 , a__=None , a__="gelu_new" , a__=0.0 , a__=0.0 , a__=0.0 , a__=1e-5 , a__=0.0_2 , a__=True , a__=50256 , a__=50256 , a__=False , **a__ , ):
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : int = n_positions
_lowerCAmelCase : List[Any] = n_embd
_lowerCAmelCase : int = n_layer
_lowerCAmelCase : Union[str, Any] = n_head
_lowerCAmelCase : List[Any] = n_inner
_lowerCAmelCase : Optional[Any] = rotary_dim
_lowerCAmelCase : Optional[int] = activation_function
_lowerCAmelCase : Union[str, Any] = resid_pdrop
_lowerCAmelCase : List[str] = embd_pdrop
_lowerCAmelCase : Optional[Any] = attn_pdrop
_lowerCAmelCase : str = layer_norm_epsilon
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[int] = use_cache
_lowerCAmelCase : int = bos_token_id
_lowerCAmelCase : Dict = eos_token_id
super().__init__(
bos_token_id=__A , eos_token_id=__A , tie_word_embeddings=__A , **__A )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ = "default" , a__ = None , a__ = False , ):
super().__init__(__A , task=__A , patching_specs=__A , use_past=__A )
if not getattr(self._config , """pad_token_id""" , __A ):
# TODO: how to do that better?
_lowerCAmelCase : Dict = 0
@property
def __A ( self ):
_lowerCAmelCase : int = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__A , direction="""inputs""" )
_lowerCAmelCase : Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
_lowerCAmelCase : Any = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __A ( self ):
return self._config.n_layer
@property
def __A ( self ):
return self._config.n_head
def __A ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ):
_lowerCAmelCase : Tuple = super(__A , self ).generate_dummy_inputs(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase : List[str] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Any = seqlen + 2
_lowerCAmelCase : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase : List[str] = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
_lowerCAmelCase : str = common_inputs["""attention_mask"""]
if self.use_past:
_lowerCAmelCase : int = ordered_inputs["""attention_mask"""].dtype
_lowerCAmelCase : Union[str, Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__A , __A , dtype=__A )] , dim=1 )
return ordered_inputs
@property
def __A ( self ):
return 13
| 718 |
"""simple docstring"""
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> List[Any]:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[int] = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_lowerCAmelCase : List[str] = subprocess.run(_lowerCamelCase ,shell=_lowerCamelCase ,stdout=subprocess.PIPE )
_lowerCAmelCase : int = output.stdout.decode("""utf-8""" )
_lowerCAmelCase : Tuple = json.loads(_lowerCamelCase )
_lowerCAmelCase : int = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" ,"""w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : int = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return values.split(""",""" )
_a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_a : Tuple = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 663 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> Dict:
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
_lowerCAmelCase : Any = sum(_lowerCamelCase ) / len(_lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 663 | 0 |
"""simple docstring"""
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_a : int = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 720 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int:
_lowerCAmelCase : List[str] = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,_lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 663 | 0 |
"""simple docstring"""
from __future__ import annotations
_a : Union[str, Any] = 1.60_21e-19 # units = C
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ,) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : torch.FloatTensor
_UpperCamelCase : Optional[torch.FloatTensor] = None
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict=0.9_99 ,_lowerCamelCase : List[str]="cosine" ,) -> List[Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCamelCase : List[Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCamelCase : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
_lowerCAmelCase : Optional[Any] = []
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = i / num_diffusion_timesteps
_lowerCAmelCase : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) ,_lowerCamelCase ) )
return torch.tensor(_lowerCamelCase ,dtype=torch.floataa )
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = 1
@register_to_config
def __init__( self , a__ = 1000 , a__ = 0.0_0_0_1 , a__ = 0.0_2 , a__ = "linear" , a__ = None , a__ = True , a__ = True , a__ = 0 , a__ = "epsilon" , a__ = 1.0 , **a__ , ):
if kwargs.get("""set_alpha_to_one""" , UpperCamelCase__ ) is not None:
_lowerCAmelCase : Any = (
"""The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."""
)
deprecate("""set_alpha_to_one""" , """1.0.0""" , UpperCamelCase__ , standard_warn=UpperCamelCase__ )
_lowerCAmelCase : str = kwargs["""set_alpha_to_one"""]
if trained_betas is not None:
_lowerCAmelCase : Union[str, Any] = torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCAmelCase : List[Any] = torch.linspace(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase : Dict = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase : List[str] = betas_for_alpha_bar(UpperCamelCase__ )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
_lowerCAmelCase : Dict = 1.0 - self.betas
_lowerCAmelCase : List[Any] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_lowerCAmelCase : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_lowerCAmelCase : Union[str, Any] = 1.0
# setable values
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : int = torch.from_numpy(np.arange(0 , UpperCamelCase__ ).copy().astype(np.intaa ) )
def __A ( self , a__ , a__ = None ):
return sample
def __A ( self , a__ , a__ = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
F" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
F" maximal {self.config.num_train_timesteps} timesteps." )
_lowerCAmelCase : List[Any] = num_inference_steps
_lowerCAmelCase : List[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase : Optional[int] = (np.arange(0 , UpperCamelCase__ ) * step_ratio).round().copy().astype(np.intaa )
_lowerCAmelCase : int = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
self.timesteps += self.config.steps_offset
def __A ( self , a__ , a__ , a__ , a__ = 0.0 , a__ = False , a__ = None , a__ = True , ):
_lowerCAmelCase : Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_lowerCAmelCase : int = self.alphas_cumprod[timestep]
_lowerCAmelCase : Any = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_lowerCAmelCase : List[str] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_lowerCAmelCase : str = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_lowerCAmelCase : Optional[int] = model_output
elif self.config.prediction_type == "sample":
_lowerCAmelCase : List[Any] = model_output
_lowerCAmelCase : Union[str, Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_lowerCAmelCase : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_lowerCAmelCase : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
""" `v_prediction`""" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_lowerCAmelCase : Dict = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase : Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def __len__( self ):
return self.config.num_train_timesteps
| 700 |
"""simple docstring"""
from PIL import Image
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ,_lowerCamelCase : int ) -> Image:
_lowerCAmelCase : Any = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
_a : str = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 663 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> Dict:
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_lowerCamelCase ,_lowerCamelCase ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(_lowerCamelCase ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
"""simple docstring"""
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A :
def __init__( self ):
_lowerCAmelCase : Union[str, Any] = [
[],
[],
[],
]
def __A ( self , a__ , a__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(a__ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __A ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ):
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class __A :
def __init__( self ):
_lowerCAmelCase : int = []
def __A ( self , a__ ):
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(a__ )
def __A ( self ):
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
_lowerCAmelCase : int = min(self.queue )
self.queue.remove(a__ )
return data
def __str__( self ):
return str(self.queue )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Tuple = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 663 | 0 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> bool:
return len(set(SCREAMING_SNAKE_CASE_ ) ) == len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A ( unittest.TestCase ):
_UpperCamelCase : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_UpperCamelCase : Any = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = AudioClassificationPipeline(model=a__ , feature_extractor=a__ )
# test with a raw waveform
_lowerCAmelCase : Optional[int] = np.zeros((34000,) )
_lowerCAmelCase : Optional[Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def __A ( self , a__ , a__ ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = examples
_lowerCAmelCase : List[Any] = audio_classifier(a__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
_lowerCAmelCase : Tuple = audio_classifier(a__ , top_k=1 )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
self.run_torchaudio(a__ )
@require_torchaudio
def __A ( self , a__ ):
import datasets
# test with a local file
_lowerCAmelCase : int = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_lowerCAmelCase : List[Any] = dataset[0]["""audio"""]["""array"""]
_lowerCAmelCase : str = audio_classifier(a__ )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
@require_torch
def __A ( self ):
_lowerCAmelCase : int = """anton-l/wav2vec2-random-tiny-classifier"""
_lowerCAmelCase : Optional[Any] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : Any = np.ones((8000,) )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
_lowerCAmelCase : List[str] = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
_lowerCAmelCase : str = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_lowerCAmelCase : int = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_lowerCAmelCase : int = audio_classifier(a__ , top_k=4 )
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A ( self ):
import datasets
_lowerCAmelCase : Optional[Any] = """superb/wav2vec2-base-superb-ks"""
_lowerCAmelCase : List[str] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : str = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_lowerCAmelCase : Optional[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
self.assertEqual(
nested_simplify(a__ , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __A ( self ):
pass
| 663 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : List[str] ) -> int:
return abs(SCREAMING_SNAKE_CASE_ ) if a == 0 else greatest_common_divisor(b % a ,SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Union[str, Any] ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_lowerCAmelCase : Union[str, Any] = y, x % y
return abs(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( ) -> Any:
try:
_lowerCAmelCase : Optional[int] = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
_lowerCAmelCase : Dict = int(nums[0] )
_lowerCAmelCase : str = int(nums[1] )
print(
f"greatest_common_divisor({num_a}, {num_a}) = "
f"{greatest_common_divisor(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )}" )
print(f"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )}" )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 703 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Dict=8 ) -> Any:
_lowerCAmelCase : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any=512 ,_lowerCamelCase : Dict=512 ) -> List[Any]:
_lowerCAmelCase : Any = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 )
_lowerCAmelCase : Dict = np.array(pil_image.convert("""RGB""" ) )
_lowerCAmelCase : List[str] = arr.astype(np.floataa ) / 1_27.5 - 1
_lowerCAmelCase : int = np.transpose(_lowerCamelCase ,[2, 0, 1] )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
_lowerCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __A ( self , a__ , a__ , a__ ):
# get the original timestep using init_timestep
_lowerCAmelCase : Optional[Any] = min(int(num_inference_steps * strength ) , a__ )
_lowerCAmelCase : List[Any] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__=None ):
if not isinstance(a__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a__ )}" )
_lowerCAmelCase : Union[str, Any] = image.to(device=a__ , dtype=a__ )
_lowerCAmelCase : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCAmelCase : int = image
else:
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(a__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[int] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a__ )
]
_lowerCAmelCase : Optional[int] = torch.cat(a__ , dim=0 )
else:
_lowerCAmelCase : List[Any] = self.movq.encode(a__ ).latent_dist.sample(a__ )
_lowerCAmelCase : Dict = self.movq.config.scaling_factor * init_latents
_lowerCAmelCase : str = torch.cat([init_latents] , dim=0 )
_lowerCAmelCase : Dict = init_latents.shape
_lowerCAmelCase : str = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
# get latents
_lowerCAmelCase : Optional[Any] = self.scheduler.add_noise(a__ , a__ , a__ )
_lowerCAmelCase : int = init_latents
return latents
def __A ( self , a__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : str = torch.device(F"cuda:{gpu_id}" )
_lowerCAmelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __A ( self , a__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase : str = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
_lowerCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self , a__ , a__ , a__ , a__ = 512 , a__ = 512 , a__ = 100 , a__ = 4.0 , a__ = 0.3 , a__ = 1 , a__ = None , a__ = "pil" , a__ = True , ):
_lowerCAmelCase : Dict = self._execution_device
_lowerCAmelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = torch.cat(a__ , dim=0 )
_lowerCAmelCase : Dict = image_embeds.shape[0]
if isinstance(a__ , a__ ):
_lowerCAmelCase : List[Any] = torch.cat(a__ , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : int = image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Any = negative_image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Any = [image]
if not all(isinstance(a__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(a__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
_lowerCAmelCase : Tuple = torch.cat([prepare_image(a__ , a__ , a__ ) for i in image] , dim=0 )
_lowerCAmelCase : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=a__ )
_lowerCAmelCase : Union[str, Any] = self.movq.encode(a__ )["""latents"""]
_lowerCAmelCase : Tuple = latents.repeat_interleave(a__ , dim=0 )
self.scheduler.set_timesteps(a__ , device=a__ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_timesteps(a__ , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCAmelCase , _lowerCAmelCase : Dict = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
_lowerCAmelCase : List[str] = self.prepare_latents(
a__ , a__ , a__ , a__ , image_embeds.dtype , a__ , a__ )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : int = {"""image_embeds""": image_embeds}
_lowerCAmelCase : List[str] = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = variance_pred.chunk(2 )
_lowerCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[str] = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
_lowerCAmelCase : int = self.movq.decode(a__ , force_not_quantize=a__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_lowerCAmelCase : List[Any] = image * 0.5 + 0.5
_lowerCAmelCase : Any = image.clamp(0 , 1 )
_lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 663 | 0 |
"""simple docstring"""
from manim import *
class __A ( __A ):
def __A ( self ):
_lowerCAmelCase : Optional[int] = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase : List[Any] = Rectangle(height=0.2_5 , width=0.2_5 )
_lowerCAmelCase : Optional[int] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_lowerCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : List[str] = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Tuple = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : int = VGroup(a__ , a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Optional[int] = Text("""CPU""" , font_size=24 )
_lowerCAmelCase : Any = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a__ )
_lowerCAmelCase : Any = [mem.copy() for i in range(4 )]
_lowerCAmelCase : List[str] = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : int = Text("""GPU""" , font_size=24 )
_lowerCAmelCase : Optional[int] = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
gpu.move_to([-1, -1, 0] )
self.add(a__ )
_lowerCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Optional[int] = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : int = Text("""Model""" , font_size=24 )
_lowerCAmelCase : Any = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
model.move_to([3, -1.0, 0] )
self.add(a__ )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = []
for i, rect in enumerate(a__ ):
rect.set_stroke(a__ )
_lowerCAmelCase : Tuple = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(a__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=a__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=a__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=a__ , buff=0.0 )
self.add(a__ )
model_cpu_arr.append(a__ )
self.add(*a__ , *a__ , *a__ )
_lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Tuple = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : int = Text("""Loaded Checkpoint""" , font_size=24 )
_lowerCAmelCase : Union[str, Any] = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(a__ )
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : List[Any] = []
for i, rect in enumerate(a__ ):
_lowerCAmelCase : Tuple = fill.copy().set_fill(a__ , opacity=0.7 )
target.move_to(a__ )
ckpt_arr.append(a__ )
_lowerCAmelCase : Tuple = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(a__ )
self.add(*a__ , *a__ )
_lowerCAmelCase : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase : Tuple = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a__ , a__ )
_lowerCAmelCase : Any = MarkupText(
F"<span fgcolor=\'{BLUE}\'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(a__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(a__ )
_lowerCAmelCase : Any = MarkupText(
F"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
_lowerCAmelCase : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
_lowerCAmelCase : List[Any] = [meta_mem.copy() for i in range(6 )]
_lowerCAmelCase : Union[str, Any] = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : str = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Any = VGroup(a__ , a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : int = Text("""Disk""" , font_size=24 )
_lowerCAmelCase : Dict = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(a__ , run_time=3 ) , Write(a__ , run_time=1 ) , Create(a__ , run_time=1 ) )
_lowerCAmelCase : Optional[int] = []
for i, rect in enumerate(a__ ):
_lowerCAmelCase : str = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(a__ , run_time=1.5 ) )
self.play(*a__ )
self.play(FadeOut(a__ ) )
_lowerCAmelCase : Any = MarkupText(F"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(a__ , run_time=3 ) )
self.play(
FadeOut(a__ , a__ , *a__ , *a__ ) , )
self.wait()
| 704 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = ShapEPipeline
_UpperCamelCase : Optional[Any] = ["prompt"]
_UpperCamelCase : Tuple = ["prompt"]
_UpperCamelCase : Dict = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : str = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 8
@property
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(a__ )
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_lowerCAmelCase : Any = PriorTransformer(**a__ )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase : Dict = ShapERenderer(**a__ )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_prior
_lowerCAmelCase : Any = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : Dict = self.dummy_renderer
_lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=a__ , clip_sample=a__ , clip_sample_range=1.0 , )
_lowerCAmelCase : List[Any] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[str] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Dict = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : str = self.pipeline_class(**a__ )
_lowerCAmelCase : List[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[str] = output.images[0]
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ):
_lowerCAmelCase : Any = torch_device == """cpu"""
_lowerCAmelCase : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a__ , relax_max_difference=a__ , )
def __A ( self ):
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = self.pipeline_class(**a__ )
_lowerCAmelCase : int = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(a__ )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase : str = batch_size * [inputs[key]]
_lowerCAmelCase : Tuple = pipe(**a__ , num_images_per_prompt=a__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
_lowerCAmelCase : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
_lowerCAmelCase : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : Any = pipe(
"""a shark""" , generator=a__ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a__ , a__ )
| 663 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : str ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Any ) -> Dict:
# Load configuration defined in the metadata file
with open(__snake_case ) as metadata_file:
_lowerCAmelCase : Union[str, Any] = json.load(__snake_case )
_lowerCAmelCase : str = LukeConfig(use_entity_aware_attention=__snake_case ,**metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
_lowerCAmelCase : List[str] = torch.load(__snake_case ,map_location="""cpu""" )
# Load the entity vocab file
_lowerCAmelCase : Tuple = load_entity_vocab(__snake_case )
_lowerCAmelCase : int = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCAmelCase : str = AddedToken("""<ent>""" ,lstrip=__snake_case ,rstrip=__snake_case )
_lowerCAmelCase : Tuple = AddedToken("""<ent2>""" ,lstrip=__snake_case ,rstrip=__snake_case )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(__snake_case )
with open(os.path.join(__snake_case ,LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) ,"""w""" ) as f:
json.dump(__snake_case ,__snake_case )
_lowerCAmelCase : Dict = LukeTokenizer.from_pretrained(__snake_case )
# Initialize the embeddings of the special tokens
_lowerCAmelCase : int = state_dict["""embeddings.word_embeddings.weight"""]
_lowerCAmelCase : Tuple = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
_lowerCAmelCase : List[Any] = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
_lowerCAmelCase : Dict = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCAmelCase : Tuple = f"encoder.layer.{layer_index}.attention.self."
_lowerCAmelCase : int = state_dict[prefix + matrix_name]
_lowerCAmelCase : int = state_dict[prefix + matrix_name]
_lowerCAmelCase : Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCAmelCase : Any = state_dict["""entity_embeddings.entity_embeddings.weight"""]
_lowerCAmelCase : Union[str, Any] = entity_emb[entity_vocab["""[MASK]"""]]
_lowerCAmelCase : Union[str, Any] = LukeModel(config=__snake_case ).eval()
_lowerCAmelCase , _lowerCAmelCase : str = model.load_state_dict(__snake_case ,strict=__snake_case )
if not (len(__snake_case ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"Missing keys {', '.join(__snake_case )}. Expected only missing embeddings.position_ids" )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
f" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}" )
# Check outputs
_lowerCAmelCase : List[str] = LukeTokenizer.from_pretrained(__snake_case ,task="""entity_classification""" )
_lowerCAmelCase : Tuple = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
_lowerCAmelCase : Optional[Any] = (39, 42)
_lowerCAmelCase : List[str] = tokenizer(__snake_case ,entity_spans=[span] ,add_prefix_space=__snake_case ,return_tensors="""pt""" )
_lowerCAmelCase : Tuple = model(**__snake_case )
# Verify word hidden states
if model_size == "large":
_lowerCAmelCase : str = torch.Size((1, 42, 1024) )
_lowerCAmelCase : List[str] = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
_lowerCAmelCase : Union[str, Any] = torch.Size((1, 42, 768) )
_lowerCAmelCase : List[Any] = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__snake_case ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_lowerCAmelCase : Optional[int] = torch.Size((1, 1, 1024) )
_lowerCAmelCase : List[Any] = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
_lowerCAmelCase : Union[str, Any] = torch.Size((1, 1, 768) )
_lowerCAmelCase : Optional[int] = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__snake_case ,atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(__snake_case ) )
model.save_pretrained(__snake_case )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> Union[str, Any]:
_lowerCAmelCase : int = {}
with open(__snake_case ,"""r""" ,encoding="""utf-8""" ) as f:
for index, line in enumerate(__snake_case ):
_lowerCAmelCase , _lowerCAmelCase : int = line.rstrip().split("""\t""" )
_lowerCAmelCase : Any = index
return entity_vocab
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_a : Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 705 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = CpmAntTokenizer
_UpperCamelCase : List[Any] = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Dict = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __A ( self ):
_lowerCAmelCase : Tuple = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_lowerCAmelCase : Optional[Any] = """今天天气真好!"""
_lowerCAmelCase : Any = ["""今天""", """天气""", """真""", """好""", """!"""]
_lowerCAmelCase : str = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = """今天天气真好!"""
_lowerCAmelCase : Optional[Any] = [tokenizer.bos_token] + tokens
_lowerCAmelCase : Optional[int] = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
_lowerCAmelCase : Tuple = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 663 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.